From pypy.commits at gmail.com Sat Jul 1 05:49:28 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 01 Jul 2017 02:49:28 -0700 (PDT) Subject: [pypy-commit] cffi default: Document NetBSD Message-ID: <59577028.8ea3df0a.4012d.5e62@mx.google.com> Author: Armin Rigo Branch: Changeset: r2991:67f818fdf4c7 Date: 2017-07-01 11:49 +0200 http://bitbucket.org/cffi/cffi/changeset/67f818fdf4c7/ Log: Document NetBSD diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -196,3 +196,14 @@ This will compile and install CFFI in this virtualenv, using the Python from this virtualenv. + + +NetBSD +++++++ + +Reports on NetBSD are not good. You first need to make sure you have an +up-to-date version of libffi, which fixes some bugs. However, there are +still a number of segfaults and failures running the CFFI tests (see +`issue 321`__). Contributions welcome. + +.. __: https://bitbucket.org/cffi/cffi/issues/321/cffi-191-segmentation-fault-during-self From pypy.commits at gmail.com Sat Jul 1 08:58:58 2017 From: pypy.commits at gmail.com (mattip) Date: Sat, 01 Jul 2017 05:58:58 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-injection: fix from merge, restore injection typedef override Message-ID: <59579c92.58abdf0a.bd1ce.9344@mx.google.com> Author: Matti Picus Branch: cpyext-injection Changeset: r91663:ed5b90a93109 Date: 2017-06-30 17:12 -0400 http://bitbucket.org/pypy/pypy/changeset/ed5b90a93109/ Log: fix from merge, restore injection typedef override diff --git a/pypy/module/cpyext/injection/numpy.py b/pypy/module/cpyext/injection/numpy.py --- a/pypy/module/cpyext/injection/numpy.py +++ b/pypy/module/cpyext/injection/numpy.py @@ -52,7 +52,7 @@ return w_obj @bootstrap_function -def init_mything(space): +def init_arrayobject(space): make_typedescr(W_ArrayObject.typedef, basestruct=PyArrayObject.TO, realize=array_realize) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -506,6 +506,8 @@ W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w, force_new_layout=new_layout, is_heaptype=flag_heaptype) + if newtypedef is not None: + self.layout.typedef = newtypedef self.flag_cpytype = True # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: From pypy.commits at gmail.com Sat Jul 1 08:59:06 2017 From: pypy.commits at gmail.com (mattip) Date: Sat, 01 Jul 2017 05:59:06 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-injection: merge default into branch Message-ID: <59579c9a.a1abdf0a.eb0f0.7e4c@mx.google.com> Author: Matti Picus Branch: cpyext-injection Changeset: r91664:36d878c36842 Date: 2017-06-30 18:26 -0400 http://bitbucket.org/pypy/pypy/changeset/36d878c36842/ Log: merge default into branch diff too long, truncating to 2000 out of 60621 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,6 +1,6 @@ syntax: glob *.py[co] -*.sw[po] +*.sw[pon] *~ .*.swp .idea @@ -8,6 +8,8 @@ .pydevproject __pycache__ +.cache/ +.gdb_history syntax: regexp ^testresult$ ^site-packages$ @@ -49,6 +51,11 @@ ^rpython/translator/goal/target.+-c$ ^rpython/translator/goal/.+\.exe$ ^rpython/translator/goal/.+\.dll$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/Makefile$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.guess$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.h$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.log$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.status$ ^pypy/goal/pypy-translation-snapshot$ ^pypy/goal/pypy-c ^pypy/goal/.+\.exe$ @@ -80,6 +87,7 @@ .hypothesis/ ^release/ ^rpython/_cache$ -^\.cache$ pypy/module/cppyy/.+/*\.pcm + + diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -36,3 +36,7 @@ aff251e543859ce4508159dd9f1a82a2f553de00 release-pypy2.7-v5.6.0 fa3249d55d15b9829e1be69cdf45b5a44cec902d release-pypy2.7-v5.7.0 b16a4363e930f6401bceb499b9520955504c6cb0 release-pypy3.5-v5.7.0 +1aa2d8e03cdfab54b7121e93fda7e98ea88a30bf release-pypy2.7-v5.7.1 +2875f328eae2216a87f3d6f335092832eb031f56 release-pypy3.5-v5.7.1 +c925e73810367cd960a32592dd7f728f436c125c release-pypy2.7-v5.8.0 +a37ecfe5f142bc971a86d17305cc5d1d70abec64 release-pypy3.5-v5.8.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -39,11 +39,11 @@ Armin Rigo Maciej Fijalkowski - Carl Friedrich Bolz + Carl Friedrich Bolz-Tereick Amaury Forgeot d'Arc Antonio Cuni + Matti Picus Samuele Pedroni - Matti Picus Ronan Lamy Alex Gaynor Philip Jenvey @@ -101,28 +101,28 @@ Vincent Legoll Michael Foord Stephan Diehl + Stefano Rivera Stefan Schwarzer Tomek Meka Valentino Volonghi - Stefano Rivera Patrick Maupin Devin Jeanpierre Bob Ippolito Bruno Gola David Malcolm Jean-Paul Calderone + Squeaky Edd Barrett - Squeaky Timo Paulssen Marius Gedminas Alexandre Fayolle Simon Burton Nicolas Truessel Martin Matusiak + Laurence Tratt Wenzhu Man Konstantin Lopuhin John Witulski - Laurence Tratt Greg Price Ivan Sichmann Freitas Dario Bertini @@ -149,13 +149,13 @@ Stian Andreassen Wanja Saatkamp Mike Blume + Joannah Nanjekye Gerald Klix Oscar Nierstrasz Rami Chowdhury Stefan H. Muller - Joannah Nanjekye + Tim Felgentreff Eugene Oden - Tim Felgentreff Jeff Terrace Henry Mason Vasily Kuznetsov @@ -164,11 +164,11 @@ Dusty Phillips Lukas Renggli Guenter Jantzen + Jasper Schulz Ned Batchelder Amit Regmi Anton Gulenko Sergey Matyunin - Jasper Schulz Andrew Chambers Nicolas Chauvat Andrew Durdin @@ -183,6 +183,7 @@ Gintautas Miliauskas Lucian Branescu Mihaila anatoly techtonik + Dodan Mihai Karl Bartel Gabriel Lavoie Jared Grubb @@ -220,12 +221,14 @@ Vaibhav Sood Reuben Cummings Attila Gobi + Alecsandru Patrascu Christopher Pope Tristan Arthur Christian Tismer Dan Stromberg Carl Meyer Florin Papa + Jens-Uwe Mager Valentina Mukhamedzhanova Stefano Parmesan touilleMan @@ -264,7 +267,6 @@ Dan Buch Lene Wagner Tomo Cocoa - Alecsandru Patrascu David Lievens Neil Blakey-Milner Henrik Vendelbo @@ -303,6 +305,7 @@ Anna Katrina Dominguez Kim Jin Su Amber Brown + Nate Bragg Ben Darnell Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -340,11 +343,13 @@ Jim Hunziker shoma hosaka Buck Golemon + Iraklis D. JohnDoe yrttyr Michael Chermside Anna Ravencroft remarkablerocket + Petre Vijiac Berker Peksag Christian Muirhead soareschen diff --git a/include/README b/include/README --- a/include/README +++ b/include/README @@ -1,7 +1,11 @@ This directory contains all the include files needed to build cpython extensions with PyPy. Note that these are just copies of the original headers -that are in pypy/module/cpyext/include: they are automatically copied from -there during translation. +that are in pypy/module/cpyext/{include,parse}: they are automatically copied +from there during translation. -Moreover, pypy_decl.h and pypy_macros.h are automatically generated, also -during translation. +Moreover, some pypy-specific files are automatically generated, also during +translation. Currently they are: +* pypy_decl.h +* pypy_macros.h +* pypy_numpy.h +* pypy_structmember_decl.h diff --git a/lib-python/2.7/ctypes/test/test_unaligned_structures.py b/lib-python/2.7/ctypes/test/test_unaligned_structures.py --- a/lib-python/2.7/ctypes/test/test_unaligned_structures.py +++ b/lib-python/2.7/ctypes/test/test_unaligned_structures.py @@ -37,7 +37,10 @@ for typ in byteswapped_structures: ## print >> sys.stderr, typ.value self.assertEqual(typ.value.offset, 1) - o = typ() + try: + o = typ() + except NotImplementedError as e: + self.skipTest(str(e)) # for PyPy o.value = 4 self.assertEqual(o.value, 4) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -61,12 +61,12 @@ def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} - g['CC'] = "gcc -pthread" - g['CXX'] = "g++ -pthread" + g['CC'] = "cc -pthread" + g['CXX'] = "c++ -pthread" g['OPT'] = "-DNDEBUG -O2" g['CFLAGS'] = "-DNDEBUG -O2" g['CCSHARED'] = "-fPIC" - g['LDSHARED'] = "gcc -pthread -shared" + g['LDSHARED'] = "cc -pthread -shared" g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['AR'] = "ar" g['ARFLAGS'] = "rc" diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py --- a/lib-python/2.7/warnings.py +++ b/lib-python/2.7/warnings.py @@ -309,9 +309,12 @@ def __init__(self, message, category, filename, lineno, file=None, line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) + self.message = message + self.category = category + self.filename = filename + self.lineno = lineno + self.file = file + self.line = line self._category_name = category.__name__ if category else None def __str__(self): diff --git a/lib-python/2.7/zipfile.py b/lib-python/2.7/zipfile.py --- a/lib-python/2.7/zipfile.py +++ b/lib-python/2.7/zipfile.py @@ -622,19 +622,23 @@ """Read and return up to n bytes. If the argument is omitted, None, or negative, data is read and returned until EOF is reached.. """ - buf = '' + # PyPy modification: don't do repeated string concatenation + buf = [] + lenbuf = 0 if n is None: n = -1 while True: if n < 0: data = self.read1(n) - elif n > len(buf): - data = self.read1(n - len(buf)) + elif n > lenbuf: + data = self.read1(n - lenbuf) else: - return buf + break if len(data) == 0: - return buf - buf += data + break + lenbuf += len(data) + buf.append(data) + return "".join(buf) def _update_crc(self, newdata, eof): # Update the CRC using the given data. diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -76,17 +76,22 @@ return self._type_._alignmentofinstances() def _CData_output(self, resarray, base=None, index=-1): - # this seems to be a string if we're array of char, surprise! - from ctypes import c_char, c_wchar - if self._type_ is c_char: - return _rawffi.charp2string(resarray.buffer, self._length_) - if self._type_ is c_wchar: - return _rawffi.wcharp2unicode(resarray.buffer, self._length_) + from _rawffi.alt import types + # If a char_p or unichar_p is received, skip the string interpretation + if base._ffiargtype != types.Pointer(types.char_p) and \ + base._ffiargtype != types.Pointer(types.unichar_p): + # this seems to be a string if we're array of char, surprise! + from ctypes import c_char, c_wchar + if self._type_ is c_char: + return _rawffi.charp2string(resarray.buffer, self._length_) + if self._type_ is c_wchar: + return _rawffi.wcharp2unicode(resarray.buffer, self._length_) res = self.__new__(self) ffiarray = self._ffiarray.fromaddress(resarray.buffer, self._length_) res._buffer = ffiarray - res._base = base - res._index = index + if base is not None: + res._base = base + res._index = index return res def _CData_retval(self, resbuffer): diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -64,8 +64,9 @@ res = object.__new__(self) res.__class__ = self res.__dict__['_buffer'] = resbuffer - res.__dict__['_base'] = base - res.__dict__['_index'] = index + if base is not None: + res.__dict__['_base'] = base + res.__dict__['_index'] = index return res def _CData_retval(self, resbuffer): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -1,4 +1,3 @@ - from _ctypes.basics import _CData, _CDataMeta, cdata_from_address from _ctypes.primitive import SimpleType, _SimpleCData from _ctypes.basics import ArgumentError, keepalive_key @@ -9,13 +8,16 @@ import sys import traceback -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f + +try: + from __pypy__ import builtinify +except ImportError: + builtinify = lambda f: f # XXX this file needs huge refactoring I fear -PARAMFLAG_FIN = 0x1 -PARAMFLAG_FOUT = 0x2 +PARAMFLAG_FIN = 0x1 +PARAMFLAG_FOUT = 0x2 PARAMFLAG_FLCID = 0x4 PARAMFLAG_COMBINED = PARAMFLAG_FIN | PARAMFLAG_FOUT | PARAMFLAG_FLCID @@ -24,9 +26,9 @@ PARAMFLAG_FIN, PARAMFLAG_FIN | PARAMFLAG_FOUT, PARAMFLAG_FIN | PARAMFLAG_FLCID - ) +) -WIN64 = sys.platform == 'win32' and sys.maxint == 2**63 - 1 +WIN64 = sys.platform == 'win32' and sys.maxint == 2 ** 63 - 1 def get_com_error(errcode, riid, pIunk): @@ -35,6 +37,7 @@ from _ctypes import COMError return COMError(errcode, None, None) + @builtinify def call_function(func, args): "Only for debugging so far: So that we can call CFunction instances" @@ -94,14 +97,9 @@ "item %d in _argtypes_ has no from_param method" % ( i + 1,)) self._argtypes_ = list(argtypes) - self._check_argtypes_for_fastpath() + argtypes = property(_getargtypes, _setargtypes) - def _check_argtypes_for_fastpath(self): - if all([hasattr(argtype, '_ffiargshape_') for argtype in self._argtypes_]): - fastpath_cls = make_fastpath_subclass(self.__class__) - fastpath_cls.enable_fastpath_maybe(self) - def _getparamflags(self): return self._paramflags @@ -126,27 +124,26 @@ raise TypeError( "paramflags must be a sequence of (int [,string [,value]]) " "tuples" - ) + ) if not isinstance(flag, int): raise TypeError( "paramflags must be a sequence of (int [,string [,value]]) " "tuples" - ) + ) _flag = flag & PARAMFLAG_COMBINED if _flag == PARAMFLAG_FOUT: typ = self._argtypes_[idx] if getattr(typ, '_ffiargshape_', None) not in ('P', 'z', 'Z'): raise TypeError( "'out' parameter %d must be a pointer type, not %s" - % (idx+1, type(typ).__name__) - ) + % (idx + 1, type(typ).__name__) + ) elif _flag not in VALID_PARAMFLAGS: raise TypeError("paramflag value %d not supported" % flag) self._paramflags = paramflags paramflags = property(_getparamflags, _setparamflags) - def _getrestype(self): return self._restype_ @@ -156,7 +153,7 @@ from ctypes import c_int restype = c_int if not (isinstance(restype, _CDataMeta) or restype is None or - callable(restype)): + callable(restype)): raise TypeError("restype must be a type, a callable, or None") self._restype_ = restype @@ -168,15 +165,18 @@ def _geterrcheck(self): return getattr(self, '_errcheck_', None) + def _seterrcheck(self, errcheck): if not callable(errcheck): raise TypeError("The errcheck attribute must be callable") self._errcheck_ = errcheck + def _delerrcheck(self): try: del self._errcheck_ except AttributeError: pass + errcheck = property(_geterrcheck, _seterrcheck, _delerrcheck) def _ffishapes(self, args, restype): @@ -188,7 +188,7 @@ raise TypeError("invalid result type for callback function") restype = restype._ffiargshape_ else: - restype = 'O' # void + restype = 'O' # void return argtypes, restype def _set_address(self, address): @@ -201,7 +201,7 @@ def __init__(self, *args): self.name = None - self._objects = {keepalive_key(0):self} + self._objects = {keepalive_key(0): self} self._needs_free = True # Empty function object -- this is needed for casts @@ -222,10 +222,8 @@ if self._argtypes_ is None: self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) - self._check_argtypes_for_fastpath() return - # A callback into python if callable(argument) and not argsl: self.callable = argument @@ -259,7 +257,7 @@ if (sys.platform == 'win32' and isinstance(argument, (int, long)) and argsl): ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_) - self._com_index = argument + 0x1000 + self._com_index = argument + 0x1000 self.name = argsl.pop(0) if argsl: self.paramflags = argsl.pop(0) @@ -281,6 +279,7 @@ except SystemExit as e: handle_system_exit(e) raise + return f def __call__(self, *args, **kwargs): @@ -317,7 +316,7 @@ except: exc_info = sys.exc_info() traceback.print_tb(exc_info[2], file=sys.stderr) - print >>sys.stderr, "%s: %s" % (exc_info[0].__name__, exc_info[1]) + print >> sys.stderr, "%s: %s" % (exc_info[0].__name__, exc_info[1]) return 0 if self._restype_ is not None: return res @@ -328,7 +327,7 @@ # really slow". Now we don't worry that much about slowness # of ctypes, and it's strange to get warnings for perfectly- # legal code. - #warnings.warn('C function without declared arguments called', + # warnings.warn('C function without declared arguments called', # RuntimeWarning, stacklevel=2) argtypes = [] @@ -337,7 +336,7 @@ if not args: raise ValueError( "native COM method call without 'this' parameter" - ) + ) thisvalue = args[0] thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( @@ -366,7 +365,6 @@ return tuple(outargs) def _call_funcptr(self, funcptr, *newargs): - if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: tmp = _rawffi.get_errno() _rawffi.set_errno(get_errno()) @@ -431,7 +429,7 @@ ffiargs = [argtype.get_ffi_argtype() for argtype in argtypes] ffires = restype.get_ffi_argtype() return _ffi.FuncPtr.fromaddr(ptr, '', ffiargs, ffires, self._flags_) - + cdll = self.dll._handle try: ffi_argtypes = [argtype.get_ffi_argtype() for argtype in argtypes] @@ -450,7 +448,7 @@ # funcname -> _funcname@ # where n is 0, 4, 8, 12, ..., 128 for i in range(33): - mangled_name = "_%s@%d" % (self.name, i*4) + mangled_name = "_%s@%d" % (self.name, i * 4) try: return cdll.getfunc(mangled_name, ffi_argtypes, ffi_restype, @@ -492,7 +490,7 @@ for argtype, arg in zip(argtypes, args): param = argtype.from_param(arg) _type_ = getattr(argtype, '_type_', None) - if _type_ == 'P': # special-case for c_void_p + if _type_ == 'P': # special-case for c_void_p param = param._get_buffer_value() elif self._is_primitive(argtype): param = param.value @@ -668,69 +666,11 @@ self._needs_free = False -def make_fastpath_subclass(CFuncPtr): - if CFuncPtr._is_fastpath: - return CFuncPtr - # - try: - return make_fastpath_subclass.memo[CFuncPtr] - except KeyError: - pass - - class CFuncPtrFast(CFuncPtr): - - _is_fastpath = True - _slowpath_allowed = True # set to False by tests - - @classmethod - def enable_fastpath_maybe(cls, obj): - if (obj.callable is None and - obj._com_index is None): - obj.__class__ = cls - - def __rollback(self): - assert self._slowpath_allowed - self.__class__ = CFuncPtr - - # disable the fast path if we reset argtypes - def _setargtypes(self, argtypes): - self.__rollback() - self._setargtypes(argtypes) - argtypes = property(CFuncPtr._getargtypes, _setargtypes) - - def _setcallable(self, func): - self.__rollback() - self.callable = func - callable = property(lambda x: None, _setcallable) - - def _setcom_index(self, idx): - self.__rollback() - self._com_index = idx - _com_index = property(lambda x: None, _setcom_index) - - def __call__(self, *args): - thisarg = None - argtypes = self._argtypes_ - restype = self._restype_ - funcptr = self._getfuncptr(argtypes, restype, thisarg) - try: - result = self._call_funcptr(funcptr, *args) - result, _ = self._do_errcheck(result, args) - except (TypeError, ArgumentError, UnicodeDecodeError): - assert self._slowpath_allowed - return CFuncPtr.__call__(self, *args) - return result - - make_fastpath_subclass.memo[CFuncPtr] = CFuncPtrFast - return CFuncPtrFast -make_fastpath_subclass.memo = {} - - def handle_system_exit(e): # issue #1194: if we get SystemExit here, then exit the interpreter. # Highly obscure imho but some people seem to depend on it. if sys.flags.inspect: - return # Don't exit if -i flag was given. + return # Don't exit if -i flag was given. else: code = e.code if isinstance(code, int): diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -234,6 +234,9 @@ if ('_abstract_' in cls.__dict__ or cls is Structure or cls is union.Union): raise TypeError("abstract class") + if hasattr(cls, '_swappedbytes_'): + raise NotImplementedError("missing in PyPy: structure/union with " + "swapped (non-native) byte ordering") if hasattr(cls, '_ffistruct_'): self.__dict__['_buffer'] = self._ffistruct_(autofree=True) return self diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -8,6 +8,9 @@ from _curses_cffi import ffi, lib +version = b"2.2" +__version__ = b"2.2" + def _copy_to_globals(name): globals()[name] = getattr(lib, name) @@ -60,10 +63,6 @@ _setup() -# Do we want this? -# version = "2.2" -# __version__ = "2.2" - # ____________________________________________________________ @@ -913,101 +912,29 @@ return None -# XXX: Do something about the following? -# /* Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES -# * and _curses.COLS */ -# #if defined(HAVE_CURSES_RESIZETERM) || defined(HAVE_CURSES_RESIZE_TERM) -# static int -# update_lines_cols(void) -# { -# PyObject *o; -# PyObject *m = PyImport_ImportModuleNoBlock("curses"); +# Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES +# and _curses.COLS +def update_lines_cols(): + globals()["LINES"] = lib.LINES + globals()["COLS"] = lib.COLS + try: + m = sys.modules["curses"] + m.LINES = lib.LINES + m.COLS = lib.COLS + except (KeyError, AttributeError): + pass -# if (!m) -# return 0; -# o = PyInt_FromLong(LINES); -# if (!o) { -# Py_DECREF(m); -# return 0; -# } -# if (PyObject_SetAttrString(m, "LINES", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# if (PyDict_SetItemString(ModDict, "LINES", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# Py_DECREF(o); -# o = PyInt_FromLong(COLS); -# if (!o) { -# Py_DECREF(m); -# return 0; -# } -# if (PyObject_SetAttrString(m, "COLS", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# if (PyDict_SetItemString(ModDict, "COLS", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# Py_DECREF(o); -# Py_DECREF(m); -# return 1; -# } -# #endif +def resizeterm(lines, columns): + _ensure_initialised() + _check_ERR(lib.resizeterm(lines, columns), "resizeterm") + update_lines_cols() -# #ifdef HAVE_CURSES_RESIZETERM -# static PyObject * -# PyCurses_ResizeTerm(PyObject *self, PyObject *args) -# { -# int lines; -# int columns; -# PyObject *result; -# PyCursesInitialised; - -# if (!PyArg_ParseTuple(args,"ii:resizeterm", &lines, &columns)) -# return NULL; - -# result = PyCursesCheckERR(resizeterm(lines, columns), "resizeterm"); -# if (!result) -# return NULL; -# if (!update_lines_cols()) -# return NULL; -# return result; -# } - -# #endif - -# #ifdef HAVE_CURSES_RESIZE_TERM -# static PyObject * -# PyCurses_Resize_Term(PyObject *self, PyObject *args) -# { -# int lines; -# int columns; - -# PyObject *result; - -# PyCursesInitialised; - -# if (!PyArg_ParseTuple(args,"ii:resize_term", &lines, &columns)) -# return NULL; - -# result = PyCursesCheckERR(resize_term(lines, columns), "resize_term"); -# if (!result) -# return NULL; -# if (!update_lines_cols()) -# return NULL; -# return result; -# } -# #endif /* HAVE_CURSES_RESIZE_TERM */ +def resize_term(lines, columns): + _ensure_initialised() + _check_ERR(lib.resize_term(lines, columns), "resize_term") + update_lines_cols() def setsyx(y, x): diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -87,6 +87,13 @@ static const chtype A_CHARTEXT; static const chtype A_COLOR; +static const chtype A_HORIZONTAL; +static const chtype A_LEFT; +static const chtype A_LOW; +static const chtype A_RIGHT; +static const chtype A_TOP; +static const chtype A_VERTICAL; + static const int BUTTON1_RELEASED; static const int BUTTON1_PRESSED; static const int BUTTON1_CLICKED; @@ -202,6 +209,8 @@ int resetty(void); int reset_prog_mode(void); int reset_shell_mode(void); +int resizeterm(int, int); +int resize_term(int, int); int savetty(void); int scroll(WINDOW *); int scrollok(WINDOW *, bool); diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py --- a/lib_pypy/_pypy_winbase_build.py +++ b/lib_pypy/_pypy_winbase_build.py @@ -79,10 +79,20 @@ BOOL WINAPI CreateProcessA(char *, char *, void *, void *, BOOL, DWORD, char *, char *, LPSTARTUPINFO, LPPROCESS_INFORMATION); +BOOL WINAPI CreateProcessW(wchar_t *, wchar_t *, void *, + void *, BOOL, DWORD, wchar_t *, + wchar_t *, LPSTARTUPINFO, LPPROCESS_INFORMATION); DWORD WINAPI WaitForSingleObject(HANDLE, DWORD); BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD); BOOL WINAPI TerminateProcess(HANDLE, UINT); HANDLE WINAPI GetStdHandle(DWORD); +DWORD WINAPI GetModuleFileNameW(HANDLE, wchar_t *, DWORD); + +UINT WINAPI SetErrorMode(UINT); +#define SEM_FAILCRITICALERRORS 0x0001 +#define SEM_NOGPFAULTERRORBOX 0x0002 +#define SEM_NOALIGNMENTFAULTEXCEPT 0x0004 +#define SEM_NOOPENFILEERRORBOX 0x8000 """) # -------------------- diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py --- a/lib_pypy/_pypy_winbase_cffi.py +++ b/lib_pypy/_pypy_winbase_cffi.py @@ -3,8 +3,8 @@ ffi = _cffi_backend.FFI('_pypy_winbase_cffi', _version = 0x2601, - _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01', - _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0), - _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')), - _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'), + _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x64\x03\x00\x00\x13\x11\x00\x00\x67\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x63\x03\x00\x00\x62\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x5B\x03\x00\x00\x39\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x39\x11\x00\x00\x39\x11\x00\x00\x1B\x11\x00\x00\x1C\x11\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x29\x0D\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x39\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x56\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x56\x0D\x00\x00\x00\x0F\x00\x00\x56\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x66\x03\x00\x00\x04\x01\x00\x00\x00\x01', + _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x38\x23CreateProcessW',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x60\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x4E\x23GetModuleFileNameW',0,b'\x00\x00\x5D\x23GetStdHandle',0,b'\x00\x00\x53\x23GetVersion',0,b'\xFF\xFF\xFF\x1FSEM_FAILCRITICALERRORS',1,b'\xFF\xFF\xFF\x1FSEM_NOALIGNMENTFAULTEXCEPT',4,b'\xFF\xFF\xFF\x1FSEM_NOGPFAULTERRORBOX',2,b'\xFF\xFF\xFF\x1FSEM_NOOPENFILEERRORBOX',32768,b'\x00\x00\x47\x23SetErrorMode',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x4A\x23WaitForSingleObject',0,b'\x00\x00\x44\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x58\x23_getwch',0,b'\x00\x00\x58\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x5A\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x55\x23_ungetwch',0), + _struct_unions = ((b'\x00\x00\x00\x62\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x63\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x56\x11wShowWindow',b'\x00\x00\x56\x11cbReserved2',b'\x00\x00\x65\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')), + _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x62PROCESS_INFORMATION',b'\x00\x00\x00\x63STARTUPINFO',b'\x00\x00\x00\x56wint_t'), ) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -31,10 +31,11 @@ import weakref from threading import _get_ident as _thread_get_ident try: - from __pypy__ import newlist_hint + from __pypy__ import newlist_hint, add_memory_pressure except ImportError: assert '__pypy__' not in sys.builtin_module_names newlist_hint = lambda sizehint: [] + add_memory_pressure = lambda size: None if sys.version_info[0] >= 3: StandardError = Exception @@ -150,6 +151,9 @@ def connect(database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): factory = Connection if not factory else factory + # an sqlite3 db seems to be around 100 KiB at least (doesn't matter if + # backed by :memory: or a file) + add_memory_pressure(100 * 1024) return factory(database, timeout, detect_types, isolation_level, check_same_thread, factory, cached_statements) diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -431,7 +431,14 @@ self.append(obj) def find_class(self, module, name): - # Subclasses may override this + if self.find_global is None: + raise UnpicklingError( + "Global and instance pickles are not supported.") + return self.find_global(module, name) + + def find_global(self, module, name): + # This can officially be patched directly in the Unpickler + # instance, according to the docs __import__(module) mod = sys.modules[module] klass = getattr(mod, name) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.10.0 +Version: 1.11.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI from .error import CDefError, FFIError, VerificationError, VerificationMissing -__version__ = "1.10.0" -__version_info__ = (1, 10, 0) +__version__ = "1.11.0" +__version_info__ = (1, 11, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_errors.h b/lib_pypy/cffi/_cffi_errors.h new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_cffi_errors.h @@ -0,0 +1,145 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " of.write(x)\n" + " self.buf += x\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -8,7 +8,7 @@ the same works for the other two macros. Py_DEBUG implies them, but not the other way around. */ -#ifndef _CFFI_USE_EMBEDDING +#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API) # include # if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) # define Py_LIMITED_API @@ -159,9 +159,9 @@ #define _cffi_from_c_struct \ ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18]) #define _cffi_to_c_wchar_t \ - ((wchar_t(*)(PyObject *))_cffi_exports[19]) + ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19]) #define _cffi_from_c_wchar_t \ - ((PyObject *(*)(wchar_t))_cffi_exports[20]) + ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20]) #define _cffi_to_c_long_double \ ((long double(*)(PyObject *))_cffi_exports[21]) #define _cffi_to_c__Bool \ @@ -174,7 +174,11 @@ #define _CFFI_CPIDX 25 #define _cffi_call_python \ ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) -#define _CFFI_NUM_EXPORTS 26 +#define _cffi_to_c_wchar3216_t \ + ((int(*)(PyObject *))_cffi_exports[26]) +#define _cffi_from_c_wchar3216_t \ + ((PyObject *(*)(int))_cffi_exports[27]) +#define _CFFI_NUM_EXPORTS 28 struct _cffi_ctypedescr; @@ -215,6 +219,46 @@ return NULL; } + +#ifdef HAVE_WCHAR_H +typedef wchar_t _cffi_wchar_t; +#else +typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */ +#endif + +_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 2) + return (uint16_t)_cffi_to_c_wchar_t(o); + else + return (uint16_t)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x) +{ + if (sizeof(_cffi_wchar_t) == 2) + return _cffi_from_c_wchar_t(x); + else + return _cffi_from_c_wchar3216_t(x); +} + +_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 4) + return (int)_cffi_to_c_wchar_t(o); + else + return (int)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x) +{ + if (sizeof(_cffi_wchar_t) == 4) + return _cffi_from_c_wchar_t(x); + else + return _cffi_from_c_wchar3216_t(x); +} + + /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -109,6 +109,8 @@ /********** CPython-specific section **********/ #ifndef PYPY_VERSION +#include "_cffi_errors.h" + #define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] @@ -220,8 +222,16 @@ /* Print as much information as potentially useful. Debugging load-time failures with embedding is not fun */ + PyObject *ecap; PyObject *exception, *v, *tb, *f, *modules, *mod; PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + if (exception != NULL) { PyErr_NormalizeException(&exception, &v, &tb); PyErr_Display(exception, v, tb); @@ -230,10 +240,9 @@ Py_XDECREF(v); Py_XDECREF(tb); - f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.10.0" + "\ncompiled with cffi version: 1.11.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); @@ -249,6 +258,7 @@ PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); PyFile_WriteString("\n\n", f); } + _cffi_stop_error_capture(ecap); } result = -1; goto done; diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -75,9 +75,10 @@ self._init_once_cache = {} self._cdef_version = None self._embedding = None + self._typecache = model.get_typecache(backend) if hasattr(backend, 'set_ffi'): backend.set_ffi(self) - for name in backend.__dict__: + for name in list(backend.__dict__): if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # @@ -764,7 +765,7 @@ if sys.platform != "win32": return backend.load_library(None, flags) name = "c" # Windows: load_library(None) fails, but this works - # (backward compatibility hack only) + # on Python 2 (backward compatibility hack only) first_error = None if '.' in name or '/' in name or os.sep in name: try: @@ -774,6 +775,9 @@ import ctypes.util path = ctypes.util.find_library(name) if path is None: + if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): + raise OSError("dlopen(None) cannot work on Windows for Python 3 " + "(see http://bugs.python.org/issue23606)") msg = ("ctypes.util.find_library() did not manage " "to locate a library called %r" % (name,)) if first_error is not None: diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -105,8 +105,12 @@ PRIM_UINT_FAST64 = 45 PRIM_INTMAX = 46 PRIM_UINTMAX = 47 +PRIM_FLOATCOMPLEX = 48 +PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 -_NUM_PRIM = 48 +_NUM_PRIM = 52 _UNKNOWN_PRIM = -1 _UNKNOWN_FLOAT_PRIM = -2 _UNKNOWN_LONG_DOUBLE = -3 @@ -128,8 +132,12 @@ 'float': PRIM_FLOAT, 'double': PRIM_DOUBLE, 'long double': PRIM_LONGDOUBLE, + 'float _Complex': PRIM_FLOATCOMPLEX, + 'double _Complex': PRIM_DOUBLECOMPLEX, '_Bool': PRIM_BOOL, 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, 'int8_t': PRIM_INT8, 'uint8_t': PRIM_UINT8, 'int16_t': PRIM_INT16, diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -16,6 +16,7 @@ except ImportError: lock = None +CDEF_SOURCE_STRING = "" _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", re.DOTALL | re.MULTILINE) _r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" @@ -258,15 +259,21 @@ ctn.discard(name) typenames += sorted(ctn) # - csourcelines = ['typedef int %s;' % typename for typename in typenames] + csourcelines = [] + csourcelines.append('# 1 ""') + for typename in typenames: + csourcelines.append('typedef int %s;' % typename) csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' ' __dotdotdot__;') + # this forces pycparser to consider the following in the file + # called from line 1 + csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) csourcelines.append(csource) - csource = '\n'.join(csourcelines) + fullcsource = '\n'.join(csourcelines) if lock is not None: lock.acquire() # pycparser is not thread-safe... try: - ast = _get_parser().parse(csource) + ast = _get_parser().parse(fullcsource) except pycparser.c_parser.ParseError as e: self.convert_pycparser_error(e, csource) finally: @@ -276,17 +283,17 @@ return ast, macros, csource def _convert_pycparser_error(self, e, csource): - # xxx look for ":NUM:" at the start of str(e) and try to interpret - # it as a line number + # xxx look for ":NUM:" at the start of str(e) + # and interpret that as a line number. This will not work if + # the user gives explicit ``# NUM "FILE"`` directives. line = None msg = str(e) - if msg.startswith(':') and ':' in msg[1:]: - linenum = msg[1:msg.find(':',1)] - if linenum.isdigit(): - linenum = int(linenum, 10) - csourcelines = csource.splitlines() - if 1 <= linenum <= len(csourcelines): - line = csourcelines[linenum-1] + match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) + if match: + linenum = int(match.group(1), 10) + csourcelines = csource.splitlines() + if 1 <= linenum <= len(csourcelines): + line = csourcelines[linenum-1] return line def convert_pycparser_error(self, e, csource): @@ -321,10 +328,12 @@ break else: assert 0 + current_decl = None # try: self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: + current_decl = decl if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) elif isinstance(decl, pycparser.c_ast.Typedef): @@ -348,7 +357,13 @@ elif decl.__class__.__name__ == 'Pragma': pass # skip pragma, only in pycparser 2.15 else: - raise CDefError("unrecognized construct", decl) + raise CDefError("unexpected <%s>: this construct is valid " + "C but not valid in cdef()" % + decl.__class__.__name__, decl) + except CDefError as e: + if len(e.args) == 1: + e.args = e.args + (current_decl,) + raise except FFIError as e: msg = self._convert_pycparser_error(e, csource) if msg: diff --git a/lib_pypy/cffi/error.py b/lib_pypy/cffi/error.py --- a/lib_pypy/cffi/error.py +++ b/lib_pypy/cffi/error.py @@ -5,10 +5,13 @@ class CDefError(Exception): def __str__(self): try: - line = 'line %d: ' % (self.args[1].coord.line,) + current_decl = self.args[1] + filename = current_decl.coord.file + linenum = current_decl.coord.line + prefix = '%s:%d: ' % (filename, linenum) except (AttributeError, TypeError, IndexError): - line = '' - return '%s%s' % (line, self.args[0]) + prefix = '' + return '%s%s' % (prefix, self.args[0]) class VerificationError(Exception): """ An error raised when verification fails diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -6,6 +6,7 @@ 'extra_objects', 'depends'] def get_extension(srcfilename, modname, sources=(), **kwds): + _hack_at_distutils() from distutils.core import Extension allsources = [srcfilename] for src in sources: @@ -15,6 +16,7 @@ def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" + _hack_at_distutils() saved_environ = os.environ.copy() try: outputfilename = _build(tmpdir, ext, compiler_verbose, debug) @@ -113,3 +115,13 @@ f = cStringIO.StringIO() _flatten(x, f) return f.getvalue() + +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -95,7 +95,8 @@ class BasePrimitiveType(BaseType): - pass + def is_complex_type(self): + return False class PrimitiveType(BasePrimitiveType): @@ -116,9 +117,13 @@ 'float': 'f', 'double': 'f', 'long double': 'f', + 'float _Complex': 'j', + 'double _Complex': 'j', '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', + 'char16_t': 'c', + 'char32_t': 'c', 'int8_t': 'i', 'uint8_t': 'i', 'int16_t': 'i', @@ -163,6 +168,8 @@ return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' + def is_complex_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' def build_backend_type(self, ffi, finishlist): return global_cache(self, ffi, 'new_primitive_type', self.name) @@ -561,22 +568,26 @@ global_lock = allocate_lock() +_typecache_cffi_backend = weakref.WeakValueDictionary() + +def get_typecache(backend): + # returns _typecache_cffi_backend if backend is the _cffi_backend + # module, or type(backend).__typecache if backend is an instance of + # CTypesBackend (or some FakeBackend class during tests) + if isinstance(backend, types.ModuleType): + return _typecache_cffi_backend + with global_lock: + if not hasattr(type(backend), '__typecache'): + type(backend).__typecache = weakref.WeakValueDictionary() + return type(backend).__typecache def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds try: - return ffi._backend.__typecache[key] + return ffi._typecache[key] except KeyError: pass - except AttributeError: - # initialize the __typecache attribute, either at the module level - # if ffi._backend is a module, or at the class level if ffi._backend - # is some instance. - if isinstance(ffi._backend, types.ModuleType): - ffi._backend.__typecache = weakref.WeakValueDictionary() - else: - type(ffi._backend).__typecache = weakref.WeakValueDictionary() try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: @@ -584,7 +595,7 @@ # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves - cache = ffi._backend.__typecache + cache = ffi._typecache with global_lock: res1 = cache.get(key) if res1 is None: diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -79,8 +79,12 @@ #define _CFFI_PRIM_UINT_FAST64 45 #define _CFFI_PRIM_INTMAX 46 #define _CFFI_PRIM_UINTMAX 47 +#define _CFFI_PRIM_FLOATCOMPLEX 48 +#define _CFFI_PRIM_DOUBLECOMPLEX 49 +#define _CFFI_PRIM_CHAR16 50 +#define _CFFI_PRIM_CHAR32 51 -#define _CFFI__NUM_PRIM 48 +#define _CFFI__NUM_PRIM 52 #define _CFFI__UNKNOWN_PRIM (-1) #define _CFFI__UNKNOWN_FLOAT_PRIM (-2) #define _CFFI__UNKNOWN_LONG_DOUBLE (-3) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,8 +3,9 @@ from .error import VerificationError from .cffi_opcode import * -VERSION = "0x2601" -VERSION_EMBEDDED = "0x2701" +VERSION_BASE = 0x2601 +VERSION_EMBEDDED = 0x2701 +VERSION_CHAR16CHAR32 = 0x2801 class GlobalExpr: @@ -126,6 +127,10 @@ self.ffi = ffi self.module_name = module_name self.target_is_python = target_is_python + self._version = VERSION_BASE + + def needs_version(self, ver): + self._version = max(self._version, ver) def collect_type_table(self): self._typesdict = {} @@ -303,10 +308,10 @@ base_module_name,)) prnt('#endif') lines = self._rel_readlines('_embedding.h') + i = lines.index('#include "_cffi_errors.h"\n') + lines[i:i+1] = self._rel_readlines('_cffi_errors.h') prnt(''.join(lines)) - version = VERSION_EMBEDDED - else: - version = VERSION + self.needs_version(VERSION_EMBEDDED) # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') @@ -405,7 +410,7 @@ prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % version) + prnt(' p[0] = (const void *)0x%x;' % self._version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -423,21 +428,22 @@ prnt('PyMODINIT_FUNC') prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') - prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, version)) + prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') - prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, version)) + prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) prnt('}') prnt('#endif') prnt() prnt('#ifdef __GNUC__') prnt('# pragma GCC visibility pop') prnt('#endif') + self._version = None def _to_py(self, x): if isinstance(x, str): @@ -476,7 +482,8 @@ prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) prnt() prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) - prnt(" _version = %s," % (VERSION,)) + prnt(" _version = 0x%x," % (self._version,)) + self._version = None # # the '_types' keyword argument self.cffi_types = tuple(self.cffi_types) # don't change any more @@ -506,7 +513,7 @@ def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): extraarg = '' - if isinstance(tp, model.BasePrimitiveType): + if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): if tp.is_integer_type() and tp.name != '_Bool': converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name @@ -515,8 +522,11 @@ # double' here, and _cffi_to_c_double would loose precision converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) else: - converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + cname = tp.get_c_name('') + converter = '(%s)_cffi_to_c_%s' % (cname, tp.name.replace(' ', '_')) + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -524,8 +534,10 @@ tovar, errcode) return # - elif isinstance(tp, model.StructOrUnionOrEnum): - # a struct (not a struct pointer) as a function argument + elif (isinstance(tp, model.StructOrUnionOrEnum) or + isinstance(tp, model.BasePrimitiveType)): + # a struct (not a struct pointer) as a function argument; + # or, a complex (the same code works) self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' % (tovar, self._gettypenum(tp), fromvar)) self._prnt(' %s;' % errcode) @@ -570,8 +582,11 @@ return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif isinstance(tp, model.UnknownFloatType): return '_cffi_from_c_double(%s)' % (var,) - elif tp.name != 'long double': - return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) + elif tp.name != 'long double' and not tp.is_complex_type(): + cname = tp.name.replace(' ', '_') + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + return '_cffi_from_c_%s(%s)' % (cname, var) else: return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) @@ -734,21 +749,26 @@ # # the PyPy version: need to replace struct/union arguments with # pointers, and if the result is a struct/union, insert a first - # arg that is a pointer to the result. + # arg that is a pointer to the result. We also do that for + # complex args and return type. + def need_indirection(type): + return (isinstance(type, model.StructOrUnion) or + (isinstance(type, model.PrimitiveType) and + type.is_complex_type())) difference = False arguments = [] call_arguments = [] context = 'argument of %s' % name for i, type in enumerate(tp.args): indirection = '' - if isinstance(type, model.StructOrUnion): + if need_indirection(type): indirection = '*' difference = True arg = type.get_c_name(' %sx%d' % (indirection, i), context) arguments.append(arg) call_arguments.append('%sx%d' % (indirection, i)) tp_result = tp.result - if isinstance(tp_result, model.StructOrUnion): + if need_indirection(tp_result): context = 'result of %s' % name arg = tp_result.get_c_name(' *result', context) arguments.insert(0, arg) @@ -1180,7 +1200,7 @@ size_of_result = '(int)sizeof(%s)' % ( tp.result.get_c_name('', context),) prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) - prnt(' { "%s", %s };' % (name, size_of_result)) + prnt(' { "%s.%s", %s };' % (self.module_name, name, size_of_result)) prnt() # arguments = [] @@ -1479,6 +1499,12 @@ _patch_for_embedding(patchlist) if target != '*': _patch_for_target(patchlist, target) + if compiler_verbose: + if tmpdir == '.': + msg = 'the current directory is' + else: + msg = 'setting the current directory to' + print('%s %r' % (msg, os.path.abspath(tmpdir))) os.chdir(tmpdir) outputfilename = ffiplatform.compile('.', ext, compiler_verbose, debug) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -808,7 +808,8 @@ #include /* this block of #ifs should be kept exactly identical between - c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ #if defined(_MSC_VER) # include /* for alloca() */ # if _MSC_VER < 1600 /* MSVC < 2010 */ @@ -842,11 +843,13 @@ # include # endif # if _MSC_VER < 1800 /* MSVC < 2013 */ - typedef unsigned char _Bool; +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif # endif #else # include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include # endif #endif diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -627,7 +627,8 @@ #include /* XXX for ssize_t on some platforms */ /* this block of #ifs should be kept exactly identical between - c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ #if defined(_MSC_VER) # include /* for alloca() */ # if _MSC_VER < 1600 /* MSVC < 2010 */ @@ -661,11 +662,13 @@ # include # endif # if _MSC_VER < 1800 /* MSVC < 2013 */ - typedef unsigned char _Bool; +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif # endif #else # include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include # endif #endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -26,16 +26,6 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7 with - # a specific MS compiler suite download) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - class Verifier(object): @@ -126,7 +116,7 @@ return basename def get_extension(self): - _hack_at_distutils() # backward compatibility hack + ffiplatform._hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -268,12 +268,22 @@ assert abs(d) == 1 source = getcurrent() source.tempval = arg - if d > 0: - cando = self.balance < 0 - dir = d - else: - cando = self.balance > 0 - dir = 0 + while True: + if d > 0: + cando = self.balance < 0 + dir = d + else: + cando = self.balance > 0 + dir = 0 + + if cando and self.queue[0]._tasklet_killed: + # issue #2595: the tasklet was killed while waiting. + # drop that tasklet from consideration and try again. + self.balance += d + self.queue.popleft() + else: + # normal path + break if _channel_callback is not None: _channel_callback(self, source, dir, not cando) @@ -348,6 +358,8 @@ module. """ tempval = None + _tasklet_killed = False + def __new__(cls, func=None, label=''): res = coroutine.__new__(cls) res.label = label @@ -395,6 +407,7 @@ If the exception passes the toplevel frame of the tasklet, the tasklet will silently die. """ + self._tasklet_killed = True if not self.is_zombie: # Killing the tasklet by throwing TaskletExit exception. coroutine.kill(self) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -42,8 +42,9 @@ from rpython.jit.backend import detect_cpu try: if detect_cpu.autodetect().startswith('x86'): - working_modules.add('_vmprof') - working_modules.add('faulthandler') + if not sys.platform.startswith('openbsd'): + working_modules.add('_vmprof') + working_modules.add('faulthandler') except detect_cpu.ProcessorAutodetectError: pass @@ -219,9 +220,6 @@ BoolOption("withsmalllong", "use a version of 'long' in a C long long", default=False), - BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -79,6 +79,9 @@ _ssl libssl +_vmprof + libunwind (optional, loaded dynamically at runtime) + Make sure to have these libraries (with development headers) installed before building PyPy, otherwise the resulting binary will not contain these modules. Furthermore, the following libraries should be present @@ -90,7 +93,8 @@ libsqlite3 curses - libncurses + libncurses-dev (for PyPy2) + libncursesw-dev (for PyPy3) gdbm libgdbm-dev @@ -103,12 +107,13 @@ To run untranslated tests, you need the Boehm garbage collector libgc. -On Debian, this is the command to install all build-time dependencies:: +On Debian and Ubuntu, this is the command to install all build-time +dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ tk-dev libgc-dev python-cffi \ - liblzma-dev # For lzma on PyPy3. + liblzma-dev libncursesw-dev # these two only needed on PyPy3 On Fedora:: @@ -185,13 +190,36 @@ :: cd pypy/tool/release - ./package.py pypy-VER-PLATFORM + ./package.py --archive-name=pypy-VER-PLATFORM This creates a clean and prepared hierarchy, as well as a ``.tar.bz2`` with the same content; both are found by default in ``/tmp/usession-YOURNAME/build/``. You can then either move the file hierarchy or unpack the ``.tar.bz2`` at the correct place. +It is recommended to use package.py because custom scripts will +invariably become out-of-date. If you want to write custom scripts +anyway, note an easy-to-miss point: some modules are written with CFFI, +and require some compilation. If you install PyPy as root without +pre-compiling them, normal users will get errors: + +* PyPy 2.5.1 or earlier: normal users would see permission errors. + Installers need to run ``pypy -c "import gdbm"`` and other similar + commands at install time; the exact list is in `package.py`_. Users + seeing a broken installation of PyPy can fix it after-the-fact if they + have sudo rights, by running once e.g. ``sudo pypy -c "import gdbm``. + +* PyPy 2.6 and later: anyone would get ``ImportError: no module named + _gdbm_cffi``. Installers need to run ``pypy _gdbm_build.py`` in the + ``lib_pypy`` directory during the installation process (plus others; + see the exact list in `package.py`_). Users seeing a broken + installation of PyPy can fix it after-the-fact, by running ``pypy + /path/to/lib_pypy/_gdbm_build.py``. This command produces a file + called ``_gdbm_cffi.pypy-41.so`` locally, which is a C extension + module for PyPy. You can move it at any place where modules are + normally found: e.g. in your project's main directory, or in a + directory that you add to the env var ``PYTHONPATH``. + Installation ------------ diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -59,16 +59,16 @@ # General information about the project. project = u'PyPy' -copyright = u'2016, The PyPy Project' +copyright = u'2017, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '5.4' +version = '5.8' # The full version, including alpha/beta/rc tags. -release = '5.4.0' +release = '5.8.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,11 +6,11 @@ Armin Rigo Maciej Fijalkowski - Carl Friedrich Bolz + Carl Friedrich Bolz-Tereick Amaury Forgeot d'Arc Antonio Cuni + Matti Picus Samuele Pedroni - Matti Picus Ronan Lamy Alex Gaynor Philip Jenvey @@ -68,28 +68,28 @@ Vincent Legoll Michael Foord Stephan Diehl + Stefano Rivera Stefan Schwarzer Tomek Meka Valentino Volonghi - Stefano Rivera Patrick Maupin Devin Jeanpierre From pypy.commits at gmail.com Sun Jul 2 10:13:20 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 02 Jul 2017 07:13:20 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Issue #2598 Message-ID: <5958ff80.925b1c0a.c8018.8612@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91665:b86de9385d47 Date: 2017-07-02 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/b86de9385d47/ Log: Issue #2598 Try to interpret a byte string for '%s' like a utf-8 string. But don't crash if it is not valid UTF-8; instead use the "replace" error handler. diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -10,6 +10,7 @@ from rpython.rlib.objectmodel import dont_inline from rpython.rlib import rstack, rstackovf from rpython.rlib import rwin32 +from rpython.rlib import runicode from pypy.interpreter import debug @@ -468,6 +469,14 @@ assert len(formats) > 0, "unsupported: no % command found" return tuple(parts), tuple(formats) +def _decode_utf8(string): + # when building the error message, don't crash if the byte string + # provided is not valid UTF-8 + assert isinstance(string, str) + result, consumed = runicode.str_decode_utf_8( + string, len(string), "replace", final=True) + return result + def get_operrcls2(valuefmt): valuefmt = valuefmt.decode('ascii') strings, formats = decompose_valuefmt(valuefmt) @@ -499,13 +508,16 @@ elif fmt == 'S': result = space.unicode_w(space.str(value)) elif fmt == 'T': - result = space.type(value).name.decode('utf-8') + result = _decode_utf8(space.type(value).name) elif fmt == 'N': result = value.getname(space) elif fmt == '8': - result = value.decode('utf-8') + result = _decode_utf8(value) else: - result = unicode(value) + if isinstance(value, unicode): + result = value + else: + result = _decode_utf8(str(value)) lst[i + i + 1] = result lst[-1] = self.xstrings[-1] return u''.join(lst) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -92,6 +92,20 @@ operr = oefmt("w_type", "abc %8", arg) val = operr._compute_value(space) assert val == u"abc àèìòù" + # + # if the arg is a byte string and we specify '%s', then we + # also get utf-8 encoding. This should be the common case + # nowadays with utf-8 byte strings being common in the RPython + # sources of PyPy. + operr = oefmt("w_type", "abc %s", arg) + val = operr._compute_value(space) + assert val == u"abc àèìòù" + # + # if the byte string is not valid utf-8, then don't crash + arg = '\xe9' + operr = oefmt("w_type", "abc %8", arg) + val = operr._compute_value(space) + def test_errorstr(space): operr = OperationError(space.w_ValueError, space.wrap("message")) From pypy.commits at gmail.com Sun Jul 2 10:13:22 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 02 Jul 2017 07:13:22 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Fix encoding Message-ID: <5958ff82.6c8fdf0a.e736c.4bc1@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91666:bdd61a245eeb Date: 2017-07-02 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/bdd61a245eeb/ Log: Fix encoding diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -106,7 +106,7 @@ "%s: %d" % (msg, widen(uid)))) return make_struct_passwd(space, pw) - at unwrap_spec(name='text') + at unwrap_spec(name='fsencode') def getpwnam(space, name): """ getpwnam(name) -> (pw_name,pw_passwd,pw_uid, From pypy.commits at gmail.com Sun Jul 2 10:25:22 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 02 Jul 2017 07:25:22 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Update the macros Py_DECREF and similar to use the CPython 3.5 version. Message-ID: <59590252.c7871c0a.9351c.743d@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91667:6ff399c0c8bd Date: 2017-07-02 16:23 +0200 http://bitbucket.org/pypy/pypy/changeset/6ff399c0c8bd/ Log: Update the macros Py_DECREF and similar to use the CPython 3.5 version. diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -37,24 +37,51 @@ #define Py_INCREF(ob) (((PyObject *)(ob))->ob_refcnt++) #define Py_DECREF(op) \ do { \ - if (--((PyObject *)(op))->ob_refcnt != 0) \ + PyObject *_py_decref_tmp = (PyObject *)(op); \ + if (--(_py_decref_tmp)->ob_refcnt != 0) \ ; \ else \ - _Py_Dealloc((PyObject *)(op)); \ + _Py_Dealloc(_py_decref_tmp); \ } while (0) -#define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0) -#define Py_XDECREF(op) do { if ((op) == NULL) ; else Py_DECREF(op); } while (0) +#define Py_XINCREF(op) \ + do { \ + PyObject *_py_xincref_tmp = (PyObject *)(op); \ + if (_py_xincref_tmp != NULL) \ + Py_INCREF(_py_xincref_tmp); \ + } while (0) + +#define Py_XDECREF(op) \ + do { \ + PyObject *_py_xdecref_tmp = (PyObject *)(op); \ + if (_py_xdecref_tmp != NULL) \ + Py_DECREF(_py_xdecref_tmp); \ + } while (0) + #endif -#define Py_CLEAR(op) \ - do { \ - if (op) { \ - PyObject *_py_tmp = (PyObject *)(op); \ - (op) = NULL; \ - Py_DECREF(_py_tmp); \ - } \ - } while (0) +#define Py_CLEAR(op) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + if (_py_tmp != NULL) { \ + (op) = NULL; \ + Py_DECREF(_py_tmp); \ + } \ + } while (0) + +#define Py_SETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_DECREF(_py_tmp); \ + } while (0) + +#define Py_XSETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_XDECREF(_py_tmp); \ + } while (0) #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) From pypy.commits at gmail.com Sun Jul 2 10:33:50 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 02 Jul 2017 07:33:50 -0700 (PDT) Subject: [pypy-commit] pypy default: Remove comment now that the py3.5 fix has been made Message-ID: <5959044e.0e831c0a.dc21.14b1@mx.google.com> Author: Armin Rigo Branch: Changeset: r91668:c3f8d403ae2f Date: 2017-07-02 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/c3f8d403ae2f/ Log: Remove comment now that the py3.5 fix has been made diff --git a/pypy/module/_cffi_backend/errorbox.py b/pypy/module/_cffi_backend/errorbox.py --- a/pypy/module/_cffi_backend/errorbox.py +++ b/pypy/module/_cffi_backend/errorbox.py @@ -86,8 +86,6 @@ return w_text = self.space.call_function(w_done) - # XXX Python 3: MessageBoxA() => MessageBoxW() - p = rffi.str2charp(self.space.bytes_w(w_text), track_allocation=False) if self.text_p: From pypy.commits at gmail.com Mon Jul 3 13:36:16 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 03 Jul 2017 10:36:16 -0700 (PDT) Subject: [pypy-commit] pypy default: Define PyMemoryViewObject from C code Message-ID: <595a8090.08891c0a.c2f48.3322@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91669:bb398c8ed6e0 Date: 2017-07-03 18:35 +0100 http://bitbucket.org/pypy/pypy/changeset/bb398c8ed6e0/ Log: Define PyMemoryViewObject from C code diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h --- a/pypy/module/cpyext/include/memoryobject.h +++ b/pypy/module/cpyext/include/memoryobject.h @@ -5,14 +5,7 @@ extern "C" { #endif -/* The struct is declared here but it shouldn't - be considered public. Don't access those fields directly, - use the functions instead! */ -typedef struct { - PyObject_HEAD - Py_buffer view; -} PyMemoryViewObject; - +#include "cpyext_memoryobject.h" /* Get a pointer to the memoryview's private copy of the exporter's buffer. */ #define PyMemoryView_GET_BUFFER(op) (&((PyMemoryViewObject *)(op))->view) diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,11 +1,8 @@ -from rpython.rlib.objectmodel import keepalive_until_here -from pypy.interpreter.error import oefmt from pypy.module.cpyext.api import ( - cpython_api, Py_buffer, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, - build_type_checkers, Py_ssize_tP, PyObjectFields, cpython_struct, - bootstrap_function, Py_bufferP, slot_function, generic_cpy_call) + cpython_api, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, + Py_ssize_tP, cts, parse_dir, bootstrap_function, Py_bufferP, slot_function) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, as_pyobj, decref, from_ref, make_typedescr, + PyObject, make_ref, decref, from_ref, make_typedescr, get_typedescr, track_reference) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen @@ -13,17 +10,12 @@ from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import +cts.parse_header(parse_dir / 'cpyext_memoryobject.h') +PyMemoryViewObject = cts.gettype('PyMemoryViewObject*') + PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView") -PyMemoryViewObjectStruct = lltype.ForwardReference() -PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) -PyMemoryViewObjectFields = PyObjectFields + \ - (("view", Py_buffer),) -cpython_struct( - "PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct, - level=2) - @bootstrap_function def init_memoryobject(space): "Type description of PyDictObject" @@ -32,7 +24,7 @@ attach=memory_attach, dealloc=memory_dealloc, realize=memory_realize, - ) + ) def memory_attach(space, py_obj, w_obj, w_userdata=None): """ From pypy.commits at gmail.com Mon Jul 3 13:44:26 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 03 Jul 2017 10:44:26 -0700 (PDT) Subject: [pypy-commit] pypy default: Add result_is_ll option to cts.decl() Message-ID: <595a827a.c5b3df0a.9fde5.d717@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91670:74beadeefdf0 Date: 2017-06-12 21:23 +0100 http://bitbucket.org/pypy/pypy/changeset/74beadeefdf0/ Log: Add result_is_ll option to cts.decl() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -456,13 +456,15 @@ return decorate def api_func_from_cdef(func, cdef, cts, - error=_NOT_SPECIFIED, header=DEFAULT_HEADER): + error=_NOT_SPECIFIED, header=DEFAULT_HEADER, + result_is_ll=False): func._always_inline_ = 'try' cdecl = cts.parse_func(cdef) RESULT = cdecl.get_llresult(cts) api_function = ApiFunction( cdecl.get_llargs(cts), RESULT, func, - error=_compute_error(error, RESULT), cdecl=cdecl) + error=_compute_error(error, RESULT), cdecl=cdecl, + result_is_ll=result_is_ll) FUNCTIONS_BY_HEADER[header][cdecl.name] = api_function unwrapper = api_function.get_unwrapper() unwrapper.func = func @@ -656,10 +658,12 @@ class CpyextTypeSpace(CTypeSpace): - def decl(self, cdef, error=_NOT_SPECIFIED, header=DEFAULT_HEADER): + def decl(self, cdef, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, + result_is_ll=False): def decorate(func): return api_func_from_cdef( - func, cdef, self, error=error, header=header) + func, cdef, self, error=error, header=header, + result_is_ll=result_is_ll) return decorate From pypy.commits at gmail.com Mon Jul 3 13:50:32 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 03 Jul 2017 10:50:32 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <595a83e8.950e1c0a.3aadc.37c9@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91671:d5cde34e0278 Date: 2017-07-03 18:49 +0100 http://bitbucket.org/pypy/pypy/changeset/d5cde34e0278/ Log: hg merge default diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -459,13 +459,15 @@ return decorate def api_func_from_cdef(func, cdef, cts, - error=_NOT_SPECIFIED, header=DEFAULT_HEADER): + error=_NOT_SPECIFIED, header=DEFAULT_HEADER, + result_is_ll=False): func._always_inline_ = 'try' cdecl = cts.parse_func(cdef) RESULT = cdecl.get_llresult(cts) api_function = ApiFunction( cdecl.get_llargs(cts), RESULT, func, - error=_compute_error(error, RESULT), cdecl=cdecl) + error=_compute_error(error, RESULT), cdecl=cdecl, + result_is_ll=result_is_ll) FUNCTIONS_BY_HEADER[header][cdecl.name] = api_function unwrapper = api_function.get_unwrapper() unwrapper.func = func @@ -670,10 +672,12 @@ class CpyextTypeSpace(CTypeSpace): - def decl(self, cdef, error=_NOT_SPECIFIED, header=DEFAULT_HEADER): + def decl(self, cdef, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, + result_is_ll=False): def decorate(func): return api_func_from_cdef( - func, cdef, self, error=error, header=header) + func, cdef, self, error=error, header=header, + result_is_ll=result_is_ll) return decorate diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h --- a/pypy/module/cpyext/include/memoryobject.h +++ b/pypy/module/cpyext/include/memoryobject.h @@ -5,14 +5,7 @@ extern "C" { #endif -/* The struct is declared here but it shouldn't - be considered public. Don't access those fields directly, - use the functions instead! */ -typedef struct { - PyObject_HEAD - Py_buffer view; -} PyMemoryViewObject; - +#include "cpyext_memoryobject.h" /* Get a pointer to the memoryview's private copy of the exporter's buffer. */ #define PyMemoryView_GET_BUFFER(op) (&((PyMemoryViewObject *)(op))->view) diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,11 +1,8 @@ -from rpython.rlib.objectmodel import keepalive_until_here -from pypy.interpreter.error import oefmt from pypy.module.cpyext.api import ( - cpython_api, Py_buffer, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, - build_type_checkers, Py_ssize_tP, PyObjectFields, cpython_struct, - bootstrap_function, Py_bufferP, slot_function, generic_cpy_call) + cpython_api, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, + Py_ssize_tP, cts, parse_dir, bootstrap_function, Py_bufferP, slot_function) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, as_pyobj, decref, from_ref, make_typedescr, + PyObject, make_ref, decref, from_ref, make_typedescr, get_typedescr, track_reference) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen @@ -14,17 +11,12 @@ from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import +cts.parse_header(parse_dir / 'cpyext_memoryobject.h') +PyMemoryViewObject = cts.gettype('PyMemoryViewObject*') + PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView") -PyMemoryViewObjectStruct = lltype.ForwardReference() -PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) -PyMemoryViewObjectFields = PyObjectFields + \ - (("view", Py_buffer),) -cpython_struct( - "PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct, - level=2) - @bootstrap_function def init_memoryobject(space): "Type description of PyDictObject" @@ -33,7 +25,7 @@ attach=memory_attach, dealloc=memory_dealloc, realize=memory_realize, - ) + ) def memory_attach(space, py_obj, w_obj, w_userdata=None): """ From pypy.commits at gmail.com Mon Jul 3 17:35:25 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 03 Jul 2017 14:35:25 -0700 (PDT) Subject: [pypy-commit] pypy default: add missing file Message-ID: <595ab89d.c68b1c0a.b6cf3.6dea@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91672:b4c172f3c5e8 Date: 2017-07-03 22:31 +0100 http://bitbucket.org/pypy/pypy/changeset/b4c172f3c5e8/ Log: add missing file diff --git a/pypy/module/cpyext/parse/cpyext_memoryobject.h b/pypy/module/cpyext/parse/cpyext_memoryobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/parse/cpyext_memoryobject.h @@ -0,0 +1,7 @@ +/* The struct is declared here but it shouldn't + be considered public. Don't access those fields directly, + use the functions instead! */ +typedef struct { + PyObject_HEAD + Py_buffer view; +} PyMemoryViewObject; From pypy.commits at gmail.com Mon Jul 3 17:35:27 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 03 Jul 2017 14:35:27 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <595ab89f.0ca6df0a.1172d.056c@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91673:e8077e1b4dd0 Date: 2017-07-03 22:32 +0100 http://bitbucket.org/pypy/pypy/changeset/e8077e1b4dd0/ Log: hg merge default diff --git a/pypy/module/cpyext/parse/cpyext_memoryobject.h b/pypy/module/cpyext/parse/cpyext_memoryobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/parse/cpyext_memoryobject.h @@ -0,0 +1,7 @@ +/* The struct is declared here but it shouldn't + be considered public. Don't access those fields directly, + use the functions instead! */ +typedef struct { + PyObject_HEAD + Py_buffer view; +} PyMemoryViewObject; From pypy.commits at gmail.com Mon Jul 3 17:35:29 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 03 Jul 2017 14:35:29 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: implement PyMemoryView_FromMemory() Message-ID: <595ab8a1.028b1c0a.9410e.c40e@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91674:1f8f159f1fdf Date: 2017-07-03 22:34 +0100 http://bitbucket.org/pypy/pypy/changeset/1f8f159f1fdf/ Log: implement PyMemoryView_FromMemory() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -126,7 +126,7 @@ Py_TPFLAGS_HEAPTYPE Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_MAX_NDIMS Py_CLEANUP_SUPPORTED -PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_WRITABLE PyBUF_SIMPLE +PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_WRITABLE PyBUF_SIMPLE PyBUF_WRITE """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen from pypy.interpreter.error import oefmt +from pypy.module.cpyext.api import PyBUF_WRITE from pypy.objspace.std.memoryobject import W_MemoryView from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import @@ -185,6 +186,20 @@ py_memview = make_ref(space, w_memview, w_obj) return py_memview + at cts.decl("""PyObject * + PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)""") +def PyMemoryView_FromMemory(space, mem, size, flags): + """Expose a raw memory area as a view of contiguous bytes. flags can be + PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes). + The memoryview has complete buffer information. + """ + from pypy.module.cpyext.slotdefs import CPyBuffer + readonly = int(widen(flags) == PyBUF_WRITE) + view = CPyBuffer(space, cts.cast('void*', mem), size, None, + readonly=readonly) + w_mview = W_MemoryView(view) + return w_mview + @cpython_api([Py_bufferP], PyObject, result_is_ll=True) def PyMemoryView_FromBuffer(space, view): """Create a memoryview object wrapping the given buffer structure view. diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -265,3 +265,12 @@ self.debug_collect() assert module.get_cnt() == 0 assert module.get_dealloc_cnt() == 1 + + def test_FromMemory(self): + module = self.import_extension('foo', [ + ('new', 'METH_NOARGS', """ + static char s[5] = "hello"; + return PyMemoryView_FromMemory(s, 4, PyBUF_READ); + """)]) + mv = module.new() + assert mv.tobytes() == b'hell' From pypy.commits at gmail.com Tue Jul 4 11:27:50 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:27:50 -0700 (PDT) Subject: [pypy-commit] stmgc c8-fix-commit-signalling: Signal commit to inevitable transaction if another transaction wants to commit Message-ID: <595bb3f6.c990df0a.3ce7.e41c@mx.google.com> Author: Tobias Weber Branch: c8-fix-commit-signalling Changeset: r2080:5d95b73a59c9 Date: 2017-06-30 18:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/5d95b73a59c9/ Log: Signal commit to inevitable transaction if another transaction wants to commit diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -366,6 +366,14 @@ static void readd_wb_executed_flags(void); static void check_all_write_barrier_flags(char *segbase, struct list_s *list); +static void signal_commit_to_inevitable_transaction(void) { + struct stm_priv_segment_info_s* inevitable_segement = get_inevitable_thread_segment(); + if (inevitable_segement != 0) { + // the inevitable thread is still running: set its "please commit" flag (is ignored by the inevitable thread if it is atomic) + inevitable_segement->commit_if_not_atomic = true; + } +} + static void wait_for_inevitable(void) { intptr_t detached = 0; @@ -382,6 +390,8 @@ try to detach an inevitable transaction regularly */ detached = fetch_detached_transaction(); if (detached == 0) { + // the inevitable trx was not detached or it was detached but is atomic + signal_commit_to_inevitable_transaction(); EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) goto wait_some_more; @@ -1627,6 +1637,7 @@ stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); + STM_PSEGMENT->commit_if_not_atomic = false; soon_finished_or_inevitable_thread_segment(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -168,6 +168,9 @@ /* For stm_enable_atomic() */ uintptr_t atomic_nesting_levels; + + // TODO signal flag that is checked in throw_away_nursery() for making immediate commit + bool commit_if_not_atomic; }; enum /* safe_point */ { diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -215,6 +215,7 @@ } } +// TODO write tests, verify is working, verify no overflows with adaptive mode uintptr_t stm_is_atomic(stm_thread_local_t *tl) { assert(STM_SEGMENT->running_thread == tl); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -500,6 +500,11 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + if (pseg->commit_if_not_atomic && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // not atomic and commit signalled by waiting thread: commit immediately + pseg->pub.nursery_mark = 0; + } + /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { wlog_t *item; diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -293,6 +293,19 @@ return false; } +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void) +{ + struct stm_priv_segment_info_s* segment; + int num; + for (num = 1; num < NB_SEGMENTS; num++) { + segment = get_priv_segment(num); + if (segment->transaction_state == TS_INEVITABLE) { + return segment; + } + } + return 0; +} + __attribute__((unused)) static bool _seems_to_be_running_transaction(void) { diff --git a/c8/stm/sync.h b/c8/stm/sync.h --- a/c8/stm/sync.h +++ b/c8/stm/sync.h @@ -29,6 +29,7 @@ static void release_thread_segment(stm_thread_local_t *tl); static void soon_finished_or_inevitable_thread_segment(void); static bool any_soon_finished_or_inevitable_thread_segment(void); +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, From pypy.commits at gmail.com Tue Jul 4 11:27:52 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:27:52 -0700 (PDT) Subject: [pypy-commit] stmgc c8-fix-commit-signalling: Add signalling also to become inevitable Message-ID: <595bb3f8.5886df0a.ab298.2863@mx.google.com> Author: Tobias Weber Branch: c8-fix-commit-signalling Changeset: r2081:51b2dcee8206 Date: 2017-06-30 23:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/51b2dcee8206/ Log: Add signalling also to become inevitable diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1594,6 +1594,8 @@ if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { + signal_commit_to_inevitable_transaction(); + /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, From pypy.commits at gmail.com Tue Jul 4 11:27:55 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:27:55 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Improve backoff parameter calculation Message-ID: <595bb3fb.07bf1c0a.93b86.3c82@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2083:2f066c0fb56c Date: 2017-07-04 12:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/2f066c0fb56c/ Log: Improve backoff parameter calculation diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -35,7 +35,8 @@ new = STM_MIN_RELATIVE_TRANSACTION_LENGTH; } // the shorter the trx, the more backoff: 1000 at min trx length, proportional decrease to 1 at max trx length (think a/x + b = backoff) - tl->transaction_length_backoff = (int)(0.0000001 / new - 0.9999999); + tl->transaction_length_backoff = (int)(1 / (100000000 * new) + 5); + // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); tl->linear_transaction_length_increment = new; } else if (tl->transaction_length_backoff == 0) { // backoff counter is zero, exponential increase up to 1 From pypy.commits at gmail.com Tue Jul 4 11:27:57 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:27:57 -0700 (PDT) Subject: [pypy-commit] stmgc c8-fix-commit-signalling: Evaluate signal to commit only if running inevitable, and refactor wait implementation of become inevitable method Message-ID: <595bb3fd.150e1c0a.1bdd6.9cee@mx.google.com> Author: Tobias Weber Branch: c8-fix-commit-signalling Changeset: r2084:f27950321d3c Date: 2017-07-04 12:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/f27950321d3c/ Log: Evaluate signal to commit only if running inevitable, and refactor wait implementation of become inevitable method diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1571,7 +1571,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = NB_SEGMENTS; //0; // TODO try disable timing_become_inevitable(); @@ -1588,8 +1588,6 @@ stm_abort_transaction(); /* is already inevitable, abort */ #endif - bool timed_out = false; - s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { @@ -1598,36 +1596,33 @@ /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, - 0.000054321)) - timed_out = true; + if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + s_mutex_unlock(); + /* try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + } + goto retry_from_start; + } + num_waits++; } s_mutex_unlock(); - - if (timed_out) { - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } + goto retry_from_start; + } else { + EMIT_WAIT_DONE(); + if (!_validate_and_turn_inevitable()) { + goto retry_from_start; } - else { - num_waits++; - } - goto retry_from_start; } - EMIT_WAIT_DONE(); - if (!_validate_and_turn_inevitable()) - goto retry_from_start; } - else { - if (!_validate_and_turn_inevitable()) - return; + else if (!_validate_and_turn_inevitable()) { + return; } /* There may be a concurrent commit of a detached Tx going on. diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -500,8 +500,10 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; - if (pseg->commit_if_not_atomic && pseg->pub.running_thread->self_or_0_if_atomic != 0) { - // not atomic and commit signalled by waiting thread: commit immediately + if (pseg->commit_if_not_atomic + && pseg->transaction_state == TS_INEVITABLE + && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; } From pypy.commits at gmail.com Tue Jul 4 11:27:59 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:27:59 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge minor fixes to commit signalling Message-ID: <595bb3ff.12a9df0a.9f47e.7904@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2085:e0f9b83d9771 Date: 2017-07-04 12:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/e0f9b83d9771/ Log: Merge minor fixes to commit signalling diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1638,7 +1638,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = NB_SEGMENTS; //0; // TODO try disable timing_become_inevitable(); @@ -1655,8 +1655,6 @@ stm_abort_transaction(); /* is already inevitable, abort */ #endif - bool timed_out = false; - s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { @@ -1665,36 +1663,33 @@ /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, - 0.000054321)) - timed_out = true; + if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + s_mutex_unlock(); + /* try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + } + goto retry_from_start; + } + num_waits++; } s_mutex_unlock(); - - if (timed_out) { - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } + goto retry_from_start; + } else { + EMIT_WAIT_DONE(); + if (!_validate_and_turn_inevitable()) { + goto retry_from_start; } - else { - num_waits++; - } - goto retry_from_start; } - EMIT_WAIT_DONE(); - if (!_validate_and_turn_inevitable()) - goto retry_from_start; } - else { - if (!_validate_and_turn_inevitable()) - return; + else if (!_validate_and_turn_inevitable()) { + return; } /* There may be a concurrent commit of a detached Tx going on. diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -549,8 +549,10 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; - if (pseg->commit_if_not_atomic && pseg->pub.running_thread->self_or_0_if_atomic != 0) { - // not atomic and commit signalled by waiting thread: commit immediately + if (pseg->commit_if_not_atomic + && pseg->transaction_state == TS_INEVITABLE + && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; } From pypy.commits at gmail.com Tue Jul 4 11:27:54 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:27:54 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge fix for commit signalling to inevitable transactions Message-ID: <595bb3fa.25addf0a.a4033.af6b@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2082:39f4ab9d3e31 Date: 2017-07-01 19:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/39f4ab9d3e31/ Log: Merge fix for commit signalling to inevitable transactions diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -385,6 +385,14 @@ static void readd_wb_executed_flags(void); static void check_all_write_barrier_flags(char *segbase, struct list_s *list); +static void signal_commit_to_inevitable_transaction(void) { + struct stm_priv_segment_info_s* inevitable_segement = get_inevitable_thread_segment(); + if (inevitable_segement != 0) { + // the inevitable thread is still running: set its "please commit" flag (is ignored by the inevitable thread if it is atomic) + inevitable_segement->commit_if_not_atomic = true; + } +} + static void wait_for_inevitable(void) { intptr_t detached = 0; @@ -401,6 +409,8 @@ try to detach an inevitable transaction regularly */ detached = fetch_detached_transaction(); if (detached == 0) { + // the inevitable trx was not detached or it was detached but is atomic + signal_commit_to_inevitable_transaction(); EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) goto wait_some_more; @@ -1651,6 +1661,8 @@ if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { + signal_commit_to_inevitable_transaction(); + /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, @@ -1694,6 +1706,7 @@ stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); + STM_PSEGMENT->commit_if_not_atomic = false; soon_finished_or_inevitable_thread_segment(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -169,6 +169,9 @@ /* For stm_enable_atomic() */ uintptr_t atomic_nesting_levels; + + // TODO signal flag that is checked in throw_away_nursery() for making immediate commit + bool commit_if_not_atomic; }; enum /* safe_point */ { diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -215,6 +215,7 @@ } } +// TODO write tests, verify is working, verify no overflows with adaptive mode uintptr_t stm_is_atomic(stm_thread_local_t *tl) { assert(STM_SEGMENT->running_thread == tl); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -548,6 +548,11 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + if (pseg->commit_if_not_atomic && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // not atomic and commit signalled by waiting thread: commit immediately + pseg->pub.nursery_mark = 0; + } + /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { wlog_t *item; diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -302,6 +302,19 @@ return false; } +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void) +{ + struct stm_priv_segment_info_s* segment; + int num; + for (num = 1; num < NB_SEGMENTS; num++) { + segment = get_priv_segment(num); + if (segment->transaction_state == TS_INEVITABLE) { + return segment; + } + } + return 0; +} + __attribute__((unused)) static bool _seems_to_be_running_transaction(void) { diff --git a/c8/stm/sync.h b/c8/stm/sync.h --- a/c8/stm/sync.h +++ b/c8/stm/sync.h @@ -30,6 +30,7 @@ static void release_thread_segment(stm_thread_local_t *tl); static void soon_finished_or_inevitable_thread_segment(void); static bool any_soon_finished_or_inevitable_thread_segment(void); +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, From pypy.commits at gmail.com Tue Jul 4 11:28:00 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:00 -0700 (PDT) Subject: [pypy-commit] stmgc c8-fix-commit-signalling: Fix check for inevitable breaks commit signalling Message-ID: <595bb400.052b1c0a.c7438.11f8@mx.google.com> Author: Tobias Weber Branch: c8-fix-commit-signalling Changeset: r2086:2a092c9c9376 Date: 2017-07-04 15:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/2a092c9c9376/ Log: Fix check for inevitable breaks commit signalling diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1571,7 +1571,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = NB_SEGMENTS; //0; // TODO try disable + int num_waits = NB_SEGMENTS; //0; timing_become_inevitable(); @@ -1614,7 +1614,8 @@ } s_mutex_unlock(); goto retry_from_start; - } else { + } + else { EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) { goto retry_from_start; diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -501,7 +501,7 @@ pseg->pub.nursery_mark -= nursery_used; if (pseg->commit_if_not_atomic - && pseg->transaction_state == TS_INEVITABLE + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Tue Jul 4 11:28:02 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:02 -0700 (PDT) Subject: [pypy-commit] stmgc c8-fix-commit-signalling: Revert aggressive wait Message-ID: <595bb402.81581c0a.fe8e5.3e84@mx.google.com> Author: Tobias Weber Branch: c8-fix-commit-signalling Changeset: r2087:1ea4b3c11042 Date: 2017-07-04 17:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/1ea4b3c11042/ Log: Revert aggressive wait diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1571,7 +1571,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = NB_SEGMENTS; //0; + int num_waits = 0; timing_become_inevitable(); From pypy.commits at gmail.com Tue Jul 4 11:28:04 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:04 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge revert aggressive wait and fix check for inevitable breaks commit signalling Message-ID: <595bb404.4e921c0a.ab490.c824@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2088:43693de1f6ff Date: 2017-07-04 17:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/43693de1f6ff/ Log: Merge revert aggressive wait and fix check for inevitable breaks commit signalling diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1638,7 +1638,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = NB_SEGMENTS; //0; // TODO try disable + int num_waits = 0; timing_become_inevitable(); @@ -1681,7 +1681,8 @@ } s_mutex_unlock(); goto retry_from_start; - } else { + } + else { EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) { goto retry_from_start; diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -550,7 +550,7 @@ pseg->pub.nursery_mark -= nursery_used; if (pseg->commit_if_not_atomic - && pseg->transaction_state == TS_INEVITABLE + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Tue Jul 4 11:28:07 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:07 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge fix for inevitable transaction commit signalling Message-ID: <595bb407.8ea3df0a.4012d.43ee@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2089:136efd31d83f Date: 2017-06-30 21:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/136efd31d83f/ Log: Merge fix for inevitable transaction commit signalling diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -385,6 +385,14 @@ static void readd_wb_executed_flags(void); static void check_all_write_barrier_flags(char *segbase, struct list_s *list); +static void signal_commit_to_inevitable_transaction(void) { + struct stm_priv_segment_info_s* inevitable_segement = get_inevitable_thread_segment(); + if (inevitable_segement != 0) { + // the inevitable thread is still running: set its "please commit" flag (is ignored by the inevitable thread if it is atomic) + inevitable_segement->commit_if_not_atomic = true; + } +} + static void wait_for_inevitable(void) { intptr_t detached = 0; @@ -401,6 +409,8 @@ try to detach an inevitable transaction regularly */ detached = fetch_detached_transaction(); if (detached == 0) { + // the inevitable trx was not detached or it was detached but is atomic + signal_commit_to_inevitable_transaction(); EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) goto wait_some_more; @@ -1694,6 +1704,7 @@ stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); + STM_PSEGMENT->commit_if_not_atomic = false; soon_finished_or_inevitable_thread_segment(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -169,6 +169,9 @@ /* For stm_enable_atomic() */ uintptr_t atomic_nesting_levels; + + // TODO signal flag that is checked in throw_away_nursery() for making immediate commit + bool commit_if_not_atomic; }; enum /* safe_point */ { diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -215,6 +215,7 @@ } } +// TODO write tests, verify is working, verify no overflows with adaptive mode uintptr_t stm_is_atomic(stm_thread_local_t *tl) { assert(STM_SEGMENT->running_thread == tl); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -542,6 +542,11 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + if (pseg->commit_if_not_atomic && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // not atomic and commit signalled by waiting thread: commit immediately + pseg->pub.nursery_mark = 0; + } + /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { wlog_t *item; diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -302,6 +302,19 @@ return false; } +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void) +{ + struct stm_priv_segment_info_s* segment; + int num; + for (num = 1; num < NB_SEGMENTS; num++) { + segment = get_priv_segment(num); + if (segment->transaction_state == TS_INEVITABLE) { + return segment; + } + } + return 0; +} + __attribute__((unused)) static bool _seems_to_be_running_transaction(void) { diff --git a/c8/stm/sync.h b/c8/stm/sync.h --- a/c8/stm/sync.h +++ b/c8/stm/sync.h @@ -30,6 +30,7 @@ static void release_thread_segment(stm_thread_local_t *tl); static void soon_finished_or_inevitable_thread_segment(void); static bool any_soon_finished_or_inevitable_thread_segment(void); +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, From pypy.commits at gmail.com Tue Jul 4 11:28:09 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:09 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge add signal to commit inevitable trx when becoming inevitable Message-ID: <595bb409.c7571c0a.ddfd8.9403@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2090:cf2b97b05bc6 Date: 2017-06-30 23:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/cf2b97b05bc6/ Log: Merge add signal to commit inevitable trx when becoming inevitable diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1661,6 +1661,8 @@ if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { + signal_commit_to_inevitable_transaction(); + /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, From pypy.commits at gmail.com Tue Jul 4 11:28:11 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:11 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge aggressive wait when becoming inevitable Message-ID: <595bb40b.4e911c0a.4e52e.6e24@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2091:d36657eedf72 Date: 2017-07-04 16:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/d36657eedf72/ Log: Merge aggressive wait when becoming inevitable diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1638,7 +1638,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = NB_SEGMENTS; //0; timing_become_inevitable(); @@ -1655,8 +1655,6 @@ stm_abort_transaction(); /* is already inevitable, abort */ #endif - bool timed_out = false; - s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { @@ -1665,36 +1663,34 @@ /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, - 0.000054321)) - timed_out = true; + if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + s_mutex_unlock(); + /* try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + } + goto retry_from_start; + } + num_waits++; } s_mutex_unlock(); - - if (timed_out) { - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - } - else { - num_waits++; - } goto retry_from_start; } - EMIT_WAIT_DONE(); - if (!_validate_and_turn_inevitable()) - goto retry_from_start; + else { + EMIT_WAIT_DONE(); + if (!_validate_and_turn_inevitable()) { + goto retry_from_start; + } + } } - else { - if (!_validate_and_turn_inevitable()) - return; + else if (!_validate_and_turn_inevitable()) { + return; } /* There may be a concurrent commit of a detached Tx going on. diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -542,8 +542,10 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; - if (pseg->commit_if_not_atomic && pseg->pub.running_thread->self_or_0_if_atomic != 0) { - // not atomic and commit signalled by waiting thread: commit immediately + if (pseg->commit_if_not_atomic + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; } From pypy.commits at gmail.com Tue Jul 4 11:28:12 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 04 Jul 2017 08:28:12 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge revert aggressive wait Message-ID: <595bb40c.415f1c0a.b2bb.5fd2@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2092:0c811825c149 Date: 2017-07-04 17:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/0c811825c149/ Log: Merge revert aggressive wait diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1638,7 +1638,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = NB_SEGMENTS; //0; + int num_waits = 0; timing_become_inevitable(); From pypy.commits at gmail.com Tue Jul 4 13:42:48 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 10:42:48 -0700 (PDT) Subject: [pypy-commit] pypy default: Move PyObject_AsCharBuffer to buffer.py for consistency with py3.5 Message-ID: <595bd398.84e31c0a.604e9.2221@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91675:94366943700b Date: 2017-07-04 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/94366943700b/ Log: Move PyObject_AsCharBuffer to buffer.py for consistency with py3.5 diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,10 +1,33 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import oefmt from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, cts, Py_buffer, - Py_ssize_t, Py_ssize_tP, generic_cpy_call, + cpython_api, Py_buffer, Py_ssize_t, Py_ssize_tP, CONST_STRINGP, cts, + generic_cpy_call, PyBUF_WRITABLE, PyBUF_FORMAT, PyBUF_ND, PyBUF_STRIDES) -from pypy.module.cpyext.pyobject import PyObject, Py_IncRef +from pypy.module.cpyext.pyobject import PyObject, incref + + at cpython_api([PyObject, CONST_STRINGP, Py_ssize_tP], rffi.INT_real, error=-1) +def PyObject_AsCharBuffer(space, obj, bufferp, sizep): + """Returns a pointer to a read-only memory location usable as + character-based input. The obj argument must support the single-segment + character buffer interface. On success, returns 0, sets buffer to the + memory location and size to the buffer length. Returns -1 and sets a + TypeError on error. + """ + pto = obj.c_ob_type + pb = pto.c_tp_as_buffer + if not (pb and pb.c_bf_getreadbuffer and pb.c_bf_getsegcount): + raise oefmt(space.w_TypeError, "expected a character buffer object") + if generic_cpy_call(space, pb.c_bf_getsegcount, + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: + raise oefmt(space.w_TypeError, + "expected a single-segment buffer object") + size = generic_cpy_call(space, pb.c_bf_getcharbuffer, + obj, 0, bufferp) + if size < 0: + return -1 + sizep[0] = size + return 0 @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, lltype.Signed, lltype.Signed], rffi.INT, error=-1) @@ -20,7 +43,7 @@ view.c_len = length view.c_obj = obj if obj: - Py_IncRef(space, obj) + incref(space, obj) view.c_itemsize = 1 rffi.setintfield(view, 'c_readonly', readonly) rffi.setintfield(view, 'c_ndim', 1) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -3,7 +3,7 @@ cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyVarObject, size_t, slot_function, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, - Py_GE, CONST_STRING, CONST_STRINGP, FILEP, fwrite) + Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, from_ref, Py_IncRef, Py_DecRef, get_typedescr) @@ -432,30 +432,6 @@ is active then NULL is returned but PyErr_Occurred() will return false.""" return space.call_function(space.builtin.get('dir'), w_o) - at cpython_api([PyObject, CONST_STRINGP, Py_ssize_tP], rffi.INT_real, error=-1) -def PyObject_AsCharBuffer(space, obj, bufferp, sizep): - """Returns a pointer to a read-only memory location usable as - character-based input. The obj argument must support the single-segment - character buffer interface. On success, returns 0, sets buffer to the - memory location and size to the buffer length. Returns -1 and sets a - TypeError on error. - """ - pto = obj.c_ob_type - - pb = pto.c_tp_as_buffer - if not (pb and pb.c_bf_getreadbuffer and pb.c_bf_getsegcount): - raise oefmt(space.w_TypeError, "expected a character buffer object") - if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: - raise oefmt(space.w_TypeError, - "expected a single-segment buffer object") - size = generic_cpy_call(space, pb.c_bf_getcharbuffer, - obj, 0, bufferp) - if size < 0: - return -1 - sizep[0] = size - return 0 - # Also in include/object.h Py_PRINT_RAW = 1 # No string quotes etc. diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -11,7 +11,7 @@ _PyString_Join) from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref -from pypy.module.cpyext.object import PyObject_AsCharBuffer +from pypy.module.cpyext.buffer import PyObject_AsCharBuffer from pypy.module.cpyext.api import PyTypeObjectPtr From pypy.commits at gmail.com Tue Jul 4 13:42:50 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 10:42:50 -0700 (PDT) Subject: [pypy-commit] pypy default: Move CPyBuffer and dependencies to buffer.py Message-ID: <595bd39a.6c8fdf0a.e736c.1d43@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91676:b22fbf59ad0f Date: 2017-07-04 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/b22fbf59ad0f/ Log: Move CPyBuffer and dependencies to buffer.py diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,10 +1,148 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import rgc # Force registration of gc.collect +from rpython.rlib.buffer import RawBuffer from pypy.interpreter.error import oefmt +from pypy.interpreter.buffer import BufferView from pypy.module.cpyext.api import ( cpython_api, Py_buffer, Py_ssize_t, Py_ssize_tP, CONST_STRINGP, cts, generic_cpy_call, PyBUF_WRITABLE, PyBUF_FORMAT, PyBUF_ND, PyBUF_STRIDES) -from pypy.module.cpyext.pyobject import PyObject, incref +from pypy.module.cpyext.typeobjectdefs import releasebufferproc +from pypy.module.cpyext.pyobject import PyObject, incref, decref, as_pyobj + +class CBuffer(RawBuffer): + _immutable_ = True + def __init__(self, view): + self.view = view + self.readonly = view.readonly + + def getlength(self): + return self.view.getlength() + + def getitem(self, index): + return self.view.ptr[index] + + def getslice(self, start, stop, step, size): + assert step == 1 + assert stop - start == size + ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), start) + return rffi.charpsize2str(ptr, size) + + def setitem(self, index, char): + self.view.ptr[index] = char + + def setslice(self, index, s): + assert s is not None + ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), index) + rffi.str2chararray(s, ptr, len(s)) + + def get_raw_address(self): + return cts.cast('char *', self.view.ptr) + +class CPyBuffer(BufferView): + # Similar to Py_buffer + _immutable_ = True + + def __init__(self, space, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True, + needs_decref=False, + releasebufferproc=rffi.cast(rffi.VOIDP, 0)): + self.space = space + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + self.pyobj = as_pyobj(space, w_obj) + self.format = format + self.ndim = ndim + self.itemsize = itemsize + + if not shape: + self.shape = [size] + else: + self.shape = shape + if not strides: + self.strides = [1] + else: + self.strides = strides + self.readonly = readonly + self.needs_decref = needs_decref + self.releasebufferproc = releasebufferproc + + def releasebuffer(self): + if self.pyobj: + if self.needs_decref: + if self.releasebufferproc: + func_target = rffi.cast(releasebufferproc, self.releasebufferproc) + with lltype.scoped_alloc(Py_buffer) as pybuf: + pybuf.c_buf = self.ptr + pybuf.c_len = self.size + pybuf.c_ndim = cts.cast('int', self.ndim) + pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) + pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) + for i in range(self.ndim): + pybuf.c_shape[i] = self.shape[i] + pybuf.c_strides[i] = self.strides[i] + if self.format: + pybuf.c_format = rffi.str2charp(self.format) + else: + pybuf.c_format = rffi.str2charp("B") + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + decref(self.space, self.pyobj) + self.pyobj = lltype.nullptr(PyObject.TO) + else: + #do not call twice + return + + def getlength(self): + return self.size + + def getbytes(self, start, size): + return ''.join([self.ptr[i] for i in range(start, start + size)]) + + def setbytes(self, start, string): + # absolutely no safety checks, what could go wrong? + for i in range(len(string)): + self.ptr[start + i] = string[i] + + def as_str(self): + return CBuffer(self).as_str() + + def as_readbuf(self): + return CBuffer(self) + + def as_writebuf(self): + assert not self.readonly + return CBuffer(self) + + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + + def getformat(self): + return self.format + + def getshape(self): + return self.shape + + def getstrides(self): + return self.strides + + def getitemsize(self): + return self.itemsize + + def getndim(self): + return self.ndim + +class FQ(rgc.FinalizerQueue): + Class = CPyBuffer + def finalizer_trigger(self): + while 1: + buf = self.next_dead() + if not buf: + break + buf.releasebuffer() + +fq = FQ() + @cpython_api([PyObject, CONST_STRINGP, Py_ssize_tP], rffi.INT_real, error=-1) def PyObject_AsCharBuffer(space, obj, bufferp, sizep): diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -9,6 +9,7 @@ from pypy.objspace.std.memoryobject import W_MemoryView from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import +from pypy.module.cpyext.buffer import CPyBuffer, fq cts.parse_header(parse_dir / 'cpyext_memoryobject.h') PyMemoryViewObject = cts.gettype('PyMemoryViewObject*') @@ -53,7 +54,6 @@ """ Creates the memory object in the interpreter """ - from pypy.module.cpyext.slotdefs import CPyBuffer, fq py_mem = rffi.cast(PyMemoryViewObject, obj) view = py_mem.c_view ndim = widen(view.c_ndim) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import widen -from rpython.rlib import rgc # Force registration of gc.collect from pypy.module.cpyext.api import ( slot_function, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, Py_buffer, Py_bufferP, PyTypeObjectPtr, cts) @@ -13,22 +12,20 @@ getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, getbufferproc, releasebufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import make_ref, decref, as_pyobj, from_ref + readbufferproc, getbufferproc, ssizessizeobjargproc) +from pypy.module.cpyext.pyobject import make_ref, decref, from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State from pypy.module.cpyext import userslot -from pypy.interpreter.buffer import BufferView +from pypy.module.cpyext.buffer import CBuffer, CPyBuffer, fq from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.argument import Arguments -from rpython.rlib.buffer import RawBuffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize, not_rpython from rpython.tool.sourcetools import func_renamer from rpython.flowspace.model import Constant from rpython.flowspace.specialcase import register_flow_sc -from rpython.rtyper.annlowlevel import llhelper from pypy.module.sys.version import CPYTHON_VERSION PY3 = CPYTHON_VERSION[0] == 3 @@ -324,141 +321,6 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.newint(res) -class CPyBuffer(BufferView): - # Similar to Py_buffer - _immutable_ = True - - def __init__(self, space, ptr, size, w_obj, format='B', shape=None, - strides=None, ndim=1, itemsize=1, readonly=True, - needs_decref=False, - releasebufferproc=rffi.cast(rffi.VOIDP, 0)): - self.space = space - self.ptr = ptr - self.size = size - self.w_obj = w_obj # kept alive - self.pyobj = as_pyobj(space, w_obj) - self.format = format - self.ndim = ndim - self.itemsize = itemsize - - if not shape: - self.shape = [size] - else: - self.shape = shape - if not strides: - self.strides = [1] - else: - self.strides = strides - self.readonly = readonly - self.needs_decref = needs_decref - self.releasebufferproc = releasebufferproc - - def releasebuffer(self): - if self.pyobj: - if self.needs_decref: - if self.releasebufferproc: - func_target = rffi.cast(releasebufferproc, self.releasebufferproc) - with lltype.scoped_alloc(Py_buffer) as pybuf: - pybuf.c_buf = self.ptr - pybuf.c_len = self.size - pybuf.c_ndim = cts.cast('int', self.ndim) - pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) - pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) - for i in range(self.ndim): - pybuf.c_shape[i] = self.shape[i] - pybuf.c_strides[i] = self.strides[i] - if self.format: - pybuf.c_format = rffi.str2charp(self.format) - else: - pybuf.c_format = rffi.str2charp("B") - generic_cpy_call(self.space, func_target, self.pyobj, pybuf) - decref(self.space, self.pyobj) - self.pyobj = lltype.nullptr(PyObject.TO) - else: - #do not call twice - return - - def getlength(self): - return self.size - - def getbytes(self, start, size): - return ''.join([self.ptr[i] for i in range(start, start + size)]) - - def setbytes(self, start, string): - # absolutely no safety checks, what could go wrong? - for i in range(len(string)): - self.ptr[start + i] = string[i] - - def as_str(self): - return CBuffer(self).as_str() - - def as_readbuf(self): - return CBuffer(self) - - def as_writebuf(self): - assert not self.readonly - return CBuffer(self) - - def get_raw_address(self): - return rffi.cast(rffi.CCHARP, self.ptr) - - def getformat(self): - return self.format - - def getshape(self): - return self.shape - - def getstrides(self): - return self.strides - - def getitemsize(self): - return self.itemsize - - def getndim(self): - return self.ndim - -class FQ(rgc.FinalizerQueue): - Class = CPyBuffer - def finalizer_trigger(self): - while 1: - buf = self.next_dead() - if not buf: - break - buf.releasebuffer() - -fq = FQ() - - -class CBuffer(RawBuffer): - _immutable_ = True - def __init__(self, view): - self.view = view - self.readonly = view.readonly - - def getlength(self): - return self.view.getlength() - - def getitem(self, index): - return self.view.ptr[index] - - def getslice(self, start, stop, step, size): - assert step == 1 - assert stop - start == size - ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), start) - return rffi.charpsize2str(ptr, size) - - def setitem(self, index, char): - self.view.ptr[index] = char - - def setslice(self, index, s): - assert s is not None - ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), index) - rffi.str2chararray(s, ptr, len(s)) - - def get_raw_address(self): - return cts.cast('char *', self.view.ptr) - - def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) From pypy.commits at gmail.com Tue Jul 4 15:00:52 2017 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 04 Jul 2017 12:00:52 -0700 (PDT) Subject: [pypy-commit] pypy default: copy over revision f29df8d, add new compilation unit Message-ID: <595be5e4.950e1c0a.3aadc.d8a4@mx.google.com> Author: Richard Plangger Branch: Changeset: r91677:82289adf3ee0 Date: 2017-07-04 14:54 -0400 http://bitbucket.org/pypy/pypy/changeset/82289adf3ee0/ Log: copy over revision f29df8d, add new compilation unit diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -58,6 +58,7 @@ SHARED.join('compat.c'), SHARED.join('machine.c'), SHARED.join('vmp_stack.c'), + SHARED.join('vmprof_main.c'), # symbol table already in separate_module_files ] + separate_module_files, post_include_bits=[], diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.c b/rpython/rlib/rvmprof/src/shared/vmprof_main.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.c @@ -0,0 +1,26 @@ +/* value: LSB bit is 1 if signals must be ignored; all other bits + are a counter for how many threads are currently in a signal handler */ +static long volatile signal_handler_value = 1; + +void vmprof_ignore_signals(int ignored) +{ + if (!ignored) { + __sync_fetch_and_and(&signal_handler_value, ~1L); + } else { + /* set the last bit, and wait until concurrently-running signal + handlers finish */ + while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { + usleep(1); + } + } +} + +long vmprof_enter_signal(void) +{ + return __sync_fetch_and_add(&signal_handler_value, 2L); +} + +long vmprof_exit_signal(void) +{ + return __sync_sub_and_fetch(&signal_handler_value, 2L); +} diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.h b/rpython/rlib/rvmprof/src/shared/vmprof_main.h --- a/rpython/rlib/rvmprof/src/shared/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.h @@ -60,25 +60,9 @@ /************************************************************/ -/* value: last bit is 1 if signals must be ignored; all other bits - are a counter for how many threads are currently in a signal handler */ -static long volatile signal_handler_value = 1; - -RPY_EXTERN -void vmprof_ignore_signals(int ignored) -{ - if (!ignored) { - __sync_fetch_and_and(&signal_handler_value, ~1L); - } - else { - /* set the last bit, and wait until concurrently-running signal - handlers finish */ - while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { - usleep(1); - } - } -} - +RPY_EXTERN void vmprof_ignore_signals(int ignored); +RPY_EXTERN long vmprof_enter_signal(void); +RPY_EXTERN long vmprof_exit_signal(void); /* ************************************************************* * functions to write a profile file compatible with gperftools @@ -276,7 +260,7 @@ __sync_lock_release(&spinlock); #endif - long val = __sync_fetch_and_add(&signal_handler_value, 2L); + long val = vmprof_enter_signal(); if ((val & 1) == 0) { int saved_errno = errno; @@ -307,7 +291,7 @@ errno = saved_errno; } - __sync_sub_and_fetch(&signal_handler_value, 2L); + vmprof_exit_signal(); } From pypy.commits at gmail.com Tue Jul 4 15:06:59 2017 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 04 Jul 2017 12:06:59 -0700 (PDT) Subject: [pypy-commit] pypy default: apply windows fix Message-ID: <595be753.84e31c0a.604e9.3157@mx.google.com> Author: Richard Plangger Branch: Changeset: r91678:5eaa2ef5baa8 Date: 2017-07-04 15:05 -0400 http://bitbucket.org/pypy/pypy/changeset/5eaa2ef5baa8/ Log: apply windows fix diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.c b/rpython/rlib/rvmprof/src/shared/vmprof_main.c --- a/rpython/rlib/rvmprof/src/shared/vmprof_main.c +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.c @@ -1,3 +1,6 @@ +#ifdef VMPROF_UNIX + +#include /* value: LSB bit is 1 if signals must be ignored; all other bits are a counter for how many threads are currently in a signal handler */ static long volatile signal_handler_value = 1; @@ -24,3 +27,4 @@ { return __sync_sub_and_fetch(&signal_handler_value, 2L); } +#endif From pypy.commits at gmail.com Tue Jul 4 15:55:43 2017 From: pypy.commits at gmail.com (mattip) Date: Tue, 04 Jul 2017 12:55:43 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: allow assignment to NULL tp_doc even after PyType_Ready Message-ID: <595bf2bf.c990df0a.3ce7.2c86@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91680:895424ab836c Date: 2017-07-03 07:56 -0400 http://bitbucket.org/pypy/pypy/changeset/895424ab836c/ Log: allow assignment to NULL tp_doc even after PyType_Ready diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -366,6 +366,15 @@ if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) +def maybe_set_doc(space, w_type): + pto = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type)) + if pto and pto.c_tp_doc and space.is_none(w_type.w_doc): + w_type.w_doc = space.newtext( + rffi.charp2str(cts.cast('char*', pto.c_tp_doc))) + # compatibility with CPython - assignment to tp_doc + # does not automatically assign to __dic__['__doc__'] + # w_type.dict_w.setdefault('__doc__', w_type.w_doc) + @slot_function([PyObject, PyObject, PyObject], PyObject) def tp_new_wrapper(space, self, w_args, w_kwds): self_pytype = rffi.cast(PyTypeObjectPtr, self) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -882,6 +882,9 @@ type(name, bases, dict) -> a new type""") w_type = _check(space, w_type) if not w_type.is_heaptype(): + if space.is_none(w_type.w_doc) and w_type.is_cpytype(): + from pypy.module.cpyext.typeobject import maybe_set_doc + maybe_set_doc(space, w_type) return w_type.w_doc w_result = w_type.getdictvalue(space, '__doc__') if w_result is None: From pypy.commits at gmail.com Tue Jul 4 15:55:45 2017 From: pypy.commits at gmail.com (mattip) Date: Tue, 04 Jul 2017 12:55:45 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: test, add more doc assignment from cpyext Message-ID: <595bf2c1.07bf1c0a.93b86.91ee@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91681:4e8cc878567f Date: 2017-07-04 15:47 -0400 http://bitbucket.org/pypy/pypy/changeset/4e8cc878567f/ Log: test, add more doc assignment from cpyext diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -250,12 +250,13 @@ return res class GetSetProperty(W_Root): - _immutable_fields_ = ["fget", "fset", "fdel"] + _immutable_fields_ = ["fget", "fset", "fdel", "is_cpyext"] w_objclass = None @specialize.arg(7) def __init__(self, fget, fset=None, fdel=None, doc=None, - cls=None, use_closure=False, tag=None, name=None): + cls=None, use_closure=False, tag=None, name=None, + is_cpyext=False): objclass_getter, cls = make_objclass_getter(tag, fget, cls) fget = make_descr_typecheck_wrapper((tag, 0), fget, cls=cls, use_closure=use_closure) @@ -264,10 +265,10 @@ fdel = make_descr_typecheck_wrapper((tag, 2), fdel, cls=cls, use_closure=use_closure) self._init(fget, fset, fdel, doc, cls, objclass_getter, use_closure, - name) + name, is_cpyext) def _init(self, fget, fset, fdel, doc, cls, objclass_getter, use_closure, - name): + name, is_cpyext): self.fget = fget self.fset = fset self.fdel = fdel @@ -276,12 +277,13 @@ self.objclass_getter = objclass_getter self.use_closure = use_closure self.name = name if name is not None else '' + self.is_cpyext = is_cpyext def copy_for_type(self, w_objclass): if self.objclass_getter is None: new = instantiate(GetSetProperty) new._init(self.fget, self.fset, self.fdel, self.doc, self.reqcls, - None, self.use_closure, self.name) + None, self.use_closure, self.name, self.is_cpyext) new.w_objclass = w_objclass return new else: @@ -348,6 +350,11 @@ space._see_getsetproperty(self) # only for fake/objspace.py return self + def descr__doc(space, w_self): + if w_self.doc is None and w_self.is_cpyext: + from pypy.module.cpyext.typeobject import maybe_set_getset_doc + maybe_set_getset_doc(space, w_self) + return space.newtext_or_none(w_self.doc) def interp_attrproperty(name, cls, doc=None, wrapfn=None): "NOT_RPYTHON: initialization-time only" @@ -374,7 +381,8 @@ __delete__ = interp2app(GetSetProperty.descr_property_del), __name__ = interp_attrproperty('name', cls=GetSetProperty, wrapfn="newtext_or_none"), __objclass__ = GetSetProperty(GetSetProperty.descr_get_objclass), - __doc__ = interp_attrproperty('doc', cls=GetSetProperty, wrapfn="newtext_or_none"), + __doc__ = GetSetProperty(GetSetProperty.descr__doc, + cls=GetSetProperty, name="__doc__"), ) assert not GetSetProperty.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -245,6 +245,7 @@ wrapfn="newtext_or_none"), __objclass__ = interp_attrproperty_w('w_objclass', cls=W_PyCMethodObject), __repr__ = interp2app(W_PyCMethodObject.descr_method_repr), + __doc__ = GetSetProperty(W_PyCFunctionObject.get_doc), ) W_PyCMethodObject.typedef.acceptable_as_base_class = False @@ -257,6 +258,7 @@ __objclass__ = interp_attrproperty_w('w_objclass', cls=W_PyCClassMethodObject), __repr__ = interp2app(W_PyCClassMethodObject.descr_method_repr), + __doc__ = GetSetProperty(W_PyCFunctionObject.get_doc), ) W_PyCClassMethodObject.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1356,25 +1356,72 @@ return (PyObject *)&FooType_Type; ''' ), - ("add_doc_string", "METH_O", + ("add_doc_string_type", "METH_O", ''' PyTypeObject* obj = (PyTypeObject *)args; obj->tp_doc = "A docstring"; Py_INCREF(Py_None); return Py_None; ''' + ), + ("add_doc_string_getset", "METH_O", + ''' + PyGetSetDescrObject * obj = (PyGetSetDescrObject*)args; + obj->d_getset->doc = "A docstring"; + Py_INCREF(Py_None); + return Py_None; + ''' + ), + ("add_doc_string_method", "METH_O", + ''' + PyMethodDescrObject * obj = (PyMethodDescrObject*)args; + obj->d_method->ml_doc = "A docstring"; + Py_INCREF(Py_None); + return Py_None; + ''' )], prologue=''' static PyTypeObject FooType_Type = { PyVarObject_HEAD_INIT(NULL, 0) "foo.Type", }; + + static PyObject * + foo_42(void *self) + { + return PyInt_FromLong(42L); + }; + + static PyObject * + foo_43(void *self) + { + return PyInt_FromLong(43L); + }; + + static PyGetSetDef foo_getsetlist[] = { + {"foo_42", + (getter)foo_42, + NULL, + NULL, NULL}, + {NULL, NULL, NULL, NULL, NULL}, + }; + + static PyMethodDef foo_methods[] = { + {"foo_43", (PyCFunction)foo_43, METH_NOARGS, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ + }; ''', more_init=''' FooType_Type.tp_flags = Py_TPFLAGS_DEFAULT; FooType_Type.tp_base = &PyType_Type; + FooType_Type.tp_getset = foo_getsetlist; + FooType_Type.tp_methods = foo_methods; if (PyType_Ready(&FooType_Type) < 0) INITERROR; ''') a = module.getType() - module.add_doc_string(a) + module.add_doc_string_type(a) assert a.__doc__ == "A docstring" assert a.__dict__['__doc__'] == None # compatibility + module.add_doc_string_getset(a.foo_42) + assert a.foo_42.__doc__ == "A docstring" + module.add_doc_string_method(a.foo_43) + assert a.foo_43.__doc__ == "A docstring" diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -59,12 +59,12 @@ set = GettersAndSetters.setter.im_func GetSetProperty.__init__(self, get, set, None, doc, cls=None, use_closure=True, - tag="cpyext_1") + tag="cpyext_1", is_cpyext=True) def PyDescr_NewGetSet(space, getset, w_type): return W_GetSetPropertyEx(getset, w_type) -def make_GetSet(space, getsetprop): +def make_PyGetSetDef(space, getsetprop): py_getsetdef = lltype.malloc(PyGetSetDef, flavor='raw') doc = getsetprop.doc if doc: @@ -95,7 +95,13 @@ set = GettersAndSetters.member_setter.im_func GetSetProperty.__init__(self, get, set, del_, doc, cls=None, use_closure=True, - tag="cpyext_2") + tag="cpyext_2", is_cpyext=True) + + def descr__doc(space, w_self): + if w_self.doc is None and w_self.member.c_doc: + w_self.doc = rffi.charp2str(w_self.member.c_doc) + return space.newtext_or_none(w_self.doc) + # change the typedef name W_MemberDescr.typedef = TypeDef( @@ -106,8 +112,8 @@ __name__ = interp_attrproperty('name', cls=GetSetProperty, wrapfn="newtext_or_none"), __objclass__ = GetSetProperty(GetSetProperty.descr_get_objclass), - __doc__ = interp_attrproperty('doc', cls=GetSetProperty, - wrapfn="newtext_or_none"), + __doc__ = GetSetProperty(W_MemberDescr.descr__doc, + cls=W_MemberDescr, name="__doc__"), ) assert not W_MemberDescr.typedef.acceptable_as_base_class # no __new__ @@ -183,16 +189,17 @@ w_obj = space.allocate_instance(W_MemberDescr, w_type) w_obj.__init__(member, w_type) track_reference(space, obj, w_obj) + print 'memberdescr_realise called' return w_obj def getsetdescr_attach(space, py_obj, w_obj, w_userdata=None): """ - Fills a newly allocated PyGetSetDescrObject with the given W_GetSetPropertyEx - object. The values must not be modified. + Fills a newly allocated PyGetSetDescrObject with the given + W_GetSetPropertyEx object. The values must not be modified. """ py_getsetdescr = rffi.cast(PyGetSetDescrObject, py_obj) - if isinstance(w_obj, GetSetProperty): - py_getsetdef = make_GetSet(space, w_obj) + if type(w_obj) is GetSetProperty: + py_getsetdef = make_PyGetSetDef(space, w_obj) assert space.isinstance_w(w_userdata, space.w_type) w_obj = W_GetSetPropertyEx(py_getsetdef, w_userdata) # XXX assign to d_dname, d_type? @@ -375,6 +382,12 @@ # does not automatically assign to __dic__['__doc__'] # w_type.dict_w.setdefault('__doc__', w_type.w_doc) +def maybe_set_getset_doc(space, w_getset): + assert isinstance(w_getset, W_GetSetPropertyEx) + if not w_getset.doc and w_getset.getset.c_doc: + w_getset.doc = rffi.charp2str( + cts.cast('char*', w_getset.getset.c_doc)) + @slot_function([PyObject, PyObject, PyObject], PyObject) def tp_new_wrapper(space, self, w_args, w_kwds): self_pytype = rffi.cast(PyTypeObjectPtr, self) From pypy.commits at gmail.com Tue Jul 4 15:55:41 2017 From: pypy.commits at gmail.com (mattip) Date: Tue, 04 Jul 2017 12:55:41 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: test for modifying tp_doc even after PyType_Ready Message-ID: <595bf2bd.89b81c0a.c0df8.3dc5@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91679:feb0a7a8e0bb Date: 2017-07-03 07:58 -0400 http://bitbucket.org/pypy/pypy/changeset/feb0a7a8e0bb/ Log: test for modifying tp_doc even after PyType_Ready diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1347,3 +1347,34 @@ Bsize = module.get_basicsize(B) assert Asize == Bsize assert Asize > basesize + + def test_late_doc_setup(self): + module = self.import_extension('foo', [ + ("getType", "METH_NOARGS", + ''' + Py_INCREF(&FooType_Type); + return (PyObject *)&FooType_Type; + ''' + ), + ("add_doc_string", "METH_O", + ''' + PyTypeObject* obj = (PyTypeObject *)args; + obj->tp_doc = "A docstring"; + Py_INCREF(Py_None); + return Py_None; + ''' + )], prologue=''' + static PyTypeObject FooType_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.Type", + }; + ''', more_init=''' + FooType_Type.tp_flags = Py_TPFLAGS_DEFAULT; + FooType_Type.tp_base = &PyType_Type; + if (PyType_Ready(&FooType_Type) < 0) INITERROR; + ''') + a = module.getType() + module.add_doc_string(a) + assert a.__doc__ == "A docstring" + assert a.__dict__['__doc__'] == None # compatibility + From pypy.commits at gmail.com Tue Jul 4 16:02:46 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 13:02:46 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Move CPyBuffer and dependencies to buffer.py Message-ID: <595bf466.c7571c0a.ddfd8.e080@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91682:a5510f559b24 Date: 2017-07-04 20:24 +0100 http://bitbucket.org/pypy/pypy/changeset/a5510f559b24/ Log: Move CPyBuffer and dependencies to buffer.py diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,10 +1,158 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import rgc # Force registration of gc.collect +from rpython.rlib.buffer import RawBuffer from pypy.interpreter.error import oefmt +from pypy.interpreter.buffer import BufferView from pypy.module.cpyext.api import ( cpython_api, Py_buffer, Py_ssize_t, Py_ssize_tP, CONST_STRINGP, cts, generic_cpy_call, PyBUF_WRITABLE, PyBUF_FORMAT, PyBUF_ND, PyBUF_STRIDES, PyBUF_SIMPLE) -from pypy.module.cpyext.pyobject import PyObject, incref, Py_DecRef +from pypy.module.cpyext.typeobjectdefs import releasebufferproc +from pypy.module.cpyext.pyobject import PyObject, incref, decref, as_pyobj + +class CBuffer(RawBuffer): + _immutable_ = True + def __init__(self, view): + self.view = view + self.readonly = view.readonly + + def getlength(self): + return self.view.getlength() + + def getitem(self, index): + return self.view.ptr[index] + + def getslice(self, start, stop, step, size): + assert step == 1 + assert stop - start == size + ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), start) + return rffi.charpsize2str(ptr, size) + + def setitem(self, index, char): + self.view.ptr[index] = char + + def setslice(self, index, s): + assert s is not None + ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), index) + rffi.str2chararray(s, ptr, len(s)) + + def get_raw_address(self): + return cts.cast('char *', self.view.ptr) + +class CPyBuffer(BufferView): + # Similar to Py_buffer + _immutable_ = True + + def __init__(self, space, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True, + needs_decref=False, + releasebufferproc=rffi.cast(rffi.VOIDP, 0)): + self.space = space + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + self.pyobj = as_pyobj(space, w_obj) + self.format = format + self.ndim = ndim + self.itemsize = itemsize + + # cf. Objects/memoryobject.c:init_shape_strides() + if ndim == 0: + self.shape = [] + self.strides = [] + elif ndim == 1: + if shape is None: + self.shape = [size // itemsize] + else: + self.shape = shape + if strides is None: + self.strides = [itemsize] + else: + self.strides = strides + else: + assert len(shape) == ndim + self.shape = shape + # XXX: missing init_strides_from_shape + self.strides = strides + self.readonly = readonly + self.needs_decref = needs_decref + self.releasebufferproc = releasebufferproc + + def releasebuffer(self): + if self.pyobj: + if self.needs_decref: + if self.releasebufferproc: + func_target = rffi.cast(releasebufferproc, self.releasebufferproc) + with lltype.scoped_alloc(Py_buffer) as pybuf: + pybuf.c_buf = self.ptr + pybuf.c_len = self.size + pybuf.c_ndim = cts.cast('int', self.ndim) + pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) + pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) + for i in range(self.ndim): + pybuf.c_shape[i] = self.shape[i] + pybuf.c_strides[i] = self.strides[i] + if self.format: + pybuf.c_format = rffi.str2charp(self.format) + else: + pybuf.c_format = rffi.str2charp("B") + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + decref(self.space, self.pyobj) + self.pyobj = lltype.nullptr(PyObject.TO) + else: + #do not call twice + return + + def getlength(self): + return self.size + + def getbytes(self, start, size): + return ''.join([self.ptr[i] for i in range(start, start + size)]) + + def setbytes(self, start, string): + # absolutely no safety checks, what could go wrong? + for i in range(len(string)): + self.ptr[start + i] = string[i] + + def as_str(self): + return CBuffer(self).as_str() + + def as_readbuf(self): + return CBuffer(self) + + def as_writebuf(self): + assert not self.readonly + return CBuffer(self) + + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + + def getformat(self): + return self.format + + def getshape(self): + return self.shape + + def getstrides(self): + return self.strides + + def getitemsize(self): + return self.itemsize + + def getndim(self): + return self.ndim + +class FQ(rgc.FinalizerQueue): + Class = CPyBuffer + def finalizer_trigger(self): + while 1: + buf = self.next_dead() + if not buf: + break + buf.releasebuffer() + +fq = FQ() + @cpython_api([PyObject, CONST_STRINGP, Py_ssize_tP], rffi.INT_real, error=-1) def PyObject_AsCharBuffer(space, obj, bufferp, sizep): @@ -32,7 +180,7 @@ if pb.c_bf_releasebuffer: generic_cpy_call(space, pb.c_bf_releasebuffer, obj, view) - Py_DecRef(space, view.c_obj) + decref(space, view.c_obj) return 0 def fill_buffer(space, view, pybuf, py_obj): @@ -71,7 +219,6 @@ share a contiguous chunk of memory of "unsigned bytes" of the given length. Returns 0 on success and -1 (with raising an error) on error. """ - flags = rffi.cast(lltype.Signed, flags) if flags & PyBUF_WRITABLE and readonly: raise oefmt(space.w_ValueError, "Object is not writable") view.c_buf = buf diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -6,11 +6,11 @@ get_typedescr, track_reference) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen -from pypy.interpreter.error import oefmt from pypy.module.cpyext.api import PyBUF_WRITE from pypy.objspace.std.memoryobject import W_MemoryView from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import +from pypy.module.cpyext.buffer import CPyBuffer, fq cts.parse_header(parse_dir / 'cpyext_memoryobject.h') PyMemoryViewObject = cts.gettype('PyMemoryViewObject*') @@ -55,7 +55,6 @@ """ Creates the memory object in the interpreter """ - from pypy.module.cpyext.slotdefs import CPyBuffer, fq py_mem = rffi.cast(PyMemoryViewObject, obj) view = py_mem.c_view ndim = widen(view.c_ndim) @@ -169,7 +168,6 @@ (fort is 'A'). Return 0 otherwise.""" # traverse the strides, checking for consistent stride increases from # right-to-left (c) or left-to-right (fortran). Copied from cpython - if view.c_suboffsets: return 0 if (fort == 'C'): @@ -193,7 +191,6 @@ PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes). The memoryview has complete buffer information. """ - from pypy.module.cpyext.slotdefs import CPyBuffer readonly = int(widen(flags) == PyBUF_WRITE) view = CPyBuffer(space, cts.cast('void*', mem), size, None, readonly=readonly) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import widen -from rpython.rlib import rgc # Force registration of gc.collect from pypy.module.cpyext.api import ( slot_function, generic_cpy_call, PyObject, Py_ssize_t, Py_buffer, Py_bufferP, PyTypeObjectPtr, cts) @@ -13,22 +12,20 @@ getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - getbufferproc, releasebufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import make_ref, decref, as_pyobj, from_ref + getbufferproc, ssizessizeobjargproc) +from pypy.module.cpyext.pyobject import make_ref, decref, from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State from pypy.module.cpyext import userslot -from pypy.interpreter.buffer import BufferView +from pypy.module.cpyext.buffer import CBuffer, CPyBuffer, fq from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.argument import Arguments -from rpython.rlib.buffer import RawBuffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize, not_rpython from rpython.tool.sourcetools import func_renamer from rpython.flowspace.model import Constant from rpython.flowspace.specialcase import register_flow_sc -from rpython.rtyper.annlowlevel import llhelper from pypy.module.sys.version import CPYTHON_VERSION PY3 = CPYTHON_VERSION[0] == 3 @@ -315,151 +312,6 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.newint(res) -class CPyBuffer(BufferView): - # Similar to Py_buffer - _immutable_ = True - - def __init__(self, space, ptr, size, w_obj, format='B', shape=None, - strides=None, ndim=1, itemsize=1, readonly=True, - needs_decref=False, - releasebufferproc=rffi.cast(rffi.VOIDP, 0)): - self.space = space - self.ptr = ptr - self.size = size - self.w_obj = w_obj # kept alive - self.pyobj = as_pyobj(space, w_obj) - self.format = format - self.ndim = ndim - self.itemsize = itemsize - - # cf. Objects/memoryobject.c:init_shape_strides() - if ndim == 0: - self.shape = [] - self.strides = [] - elif ndim == 1: - if shape is None: - self.shape = [size // itemsize] - else: - self.shape = shape - if strides is None: - self.strides = [itemsize] - else: - self.strides = strides - else: - assert len(shape) == ndim - self.shape = shape - # XXX: missing init_strides_from_shape - self.strides = strides - self.readonly = readonly - self.needs_decref = needs_decref - self.releasebufferproc = releasebufferproc - - def releasebuffer(self): - if self.pyobj: - if self.needs_decref: - if self.releasebufferproc: - func_target = rffi.cast(releasebufferproc, self.releasebufferproc) - with lltype.scoped_alloc(Py_buffer) as pybuf: - pybuf.c_buf = self.ptr - pybuf.c_len = self.size - pybuf.c_ndim = cts.cast('int', self.ndim) - pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) - pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) - for i in range(self.ndim): - pybuf.c_shape[i] = self.shape[i] - pybuf.c_strides[i] = self.strides[i] - if self.format: - pybuf.c_format = rffi.str2charp(self.format) - else: - pybuf.c_format = rffi.str2charp("B") - generic_cpy_call(self.space, func_target, self.pyobj, pybuf) - decref(self.space, self.pyobj) - self.pyobj = lltype.nullptr(PyObject.TO) - else: - #do not call twice - return - - def getlength(self): - return self.size - - def getbytes(self, start, size): - return ''.join([self.ptr[i] for i in range(start, start + size)]) - - def setbytes(self, start, string): - # absolutely no safety checks, what could go wrong? - for i in range(len(string)): - self.ptr[start + i] = string[i] - - def as_str(self): - return CBuffer(self).as_str() - - def as_readbuf(self): - return CBuffer(self) - - def as_writebuf(self): - assert not self.readonly - return CBuffer(self) - - def get_raw_address(self): - return rffi.cast(rffi.CCHARP, self.ptr) - - def getformat(self): - return self.format - - def getshape(self): - return self.shape - - def getstrides(self): - return self.strides - - def getitemsize(self): - return self.itemsize - - def getndim(self): - return self.ndim - -class FQ(rgc.FinalizerQueue): - Class = CPyBuffer - def finalizer_trigger(self): - while 1: - buf = self.next_dead() - if not buf: - break - buf.releasebuffer() - -fq = FQ() - - -class CBuffer(RawBuffer): - _immutable_ = True - def __init__(self, view): - self.view = view - self.readonly = view.readonly - - def getlength(self): - return self.view.getlength() - - def getitem(self, index): - return self.view.ptr[index] - - def getslice(self, start, stop, step, size): - assert step == 1 - assert stop - start == size - ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), start) - return rffi.charpsize2str(ptr, size) - - def setitem(self, index, char): - self.view.ptr[index] = char - - def setslice(self, index, s): - assert s is not None - ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), index) - rffi.str2chararray(s, ptr, len(s)) - - def get_raw_address(self): - return cts.cast('char *', self.view.ptr) - - def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) From pypy.commits at gmail.com Tue Jul 4 16:02:48 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 13:02:48 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: fix and update tests Message-ID: <595bf468.41921c0a.79a43.5440@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91683:73b9066a185c Date: 2017-07-04 21:02 +0100 http://bitbucket.org/pypy/pypy/changeset/73b9066a185c/ Log: fix and update tests diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -1,13 +1,15 @@ # encoding: utf-8 -import pytest from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -from pypy.module.cpyext.bytesobject import new_empty_str, PyBytesObject -from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call, Py_buffer -from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref, as_pyobj -from pypy.module.cpyext.api import PyTypeObjectPtr +from pypy.module.cpyext.bytesobject import ( + new_empty_str, PyBytesObject, _PyBytes_Resize, PyBytes_Concat, + PyBytes_ConcatAndDel, + _PyBytes_Eq, + _PyBytes_Join) +from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP +from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref +from pypy.module.cpyext.buffer import PyObject_AsCharBuffer class AppTestBytesObject(AppTestCpythonExtensionBase): @@ -225,21 +227,21 @@ assert module.check_suboffsets(b'1234') == 1 class TestBytes(BaseApiTest): - def test_bytes_resize(self, space, api): + def test_bytes_resize(self, space): py_str = new_empty_str(space, 10) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_str.c_ob_sval[0] = 'a' py_str.c_ob_sval[1] = 'b' py_str.c_ob_sval[2] = 'c' ar[0] = rffi.cast(PyObject, py_str) - api._PyBytes_Resize(ar, 3) + _PyBytes_Resize(space, ar, 3) py_str = rffi.cast(PyBytesObject, ar[0]) assert py_str.c_ob_size == 3 assert py_str.c_ob_sval[1] == 'b' assert py_str.c_ob_sval[3] == '\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_str) - api._PyBytes_Resize(ar, 10) + _PyBytes_Resize(space, ar, 10) py_str = rffi.cast(PyBytesObject, ar[0]) assert py_str.c_ob_size == 10 assert py_str.c_ob_sval[1] == 'b' @@ -247,61 +249,61 @@ Py_DecRef(space, ar[0]) lltype.free(ar, flavor='raw') - def test_Concat(self, space, api): + def test_Concat(self, space): ref = make_ref(space, space.newbytes('abc')) ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') ptr[0] = ref prev_refcnt = ref.c_ob_refcnt - api.PyBytes_Concat(ptr, space.newbytes('def')) + PyBytes_Concat(space, ptr, space.newbytes('def')) assert ref.c_ob_refcnt == prev_refcnt - 1 assert space.bytes_w(from_ref(space, ptr[0])) == 'abcdef' - api.PyBytes_Concat(ptr, space.w_None) + with raises_w(space, TypeError): + PyBytes_Concat(space, ptr, space.w_None) assert not ptr[0] - api.PyErr_Clear() ptr[0] = lltype.nullptr(PyObject.TO) - api.PyBytes_Concat(ptr, space.newbytes('def')) # should not crash + PyBytes_Concat(space, ptr, space.newbytes('def')) # should not crash lltype.free(ptr, flavor='raw') - def test_ConcatAndDel(self, space, api): + def test_ConcatAndDel(self, space): ref1 = make_ref(space, space.newbytes('abc')) ref2 = make_ref(space, space.newbytes('def')) ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') ptr[0] = ref1 prev_refcnf = ref2.c_ob_refcnt - api.PyBytes_ConcatAndDel(ptr, ref2) + PyBytes_ConcatAndDel(space, ptr, ref2) assert space.bytes_w(from_ref(space, ptr[0])) == 'abcdef' assert ref2.c_ob_refcnt == prev_refcnf - 1 Py_DecRef(space, ptr[0]) ptr[0] = lltype.nullptr(PyObject.TO) ref2 = make_ref(space, space.newbytes('foo')) prev_refcnf = ref2.c_ob_refcnt - api.PyBytes_ConcatAndDel(ptr, ref2) # should not crash + PyBytes_ConcatAndDel(space, ptr, ref2) # should not crash assert ref2.c_ob_refcnt == prev_refcnf - 1 lltype.free(ptr, flavor='raw') - def test_asbuffer(self, space, api): + def test_asbuffer(self, space): bufp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') lenp = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') w_text = space.newbytes("text") ref = make_ref(space, w_text) prev_refcnt = ref.c_ob_refcnt - assert api.PyObject_AsCharBuffer(ref, bufp, lenp) == 0 + assert PyObject_AsCharBuffer(space, ref, bufp, lenp) == 0 assert ref.c_ob_refcnt == prev_refcnt assert lenp[0] == 4 assert rffi.charp2str(bufp[0]) == 'text' lltype.free(bufp, flavor='raw') lltype.free(lenp, flavor='raw') - api.Py_DecRef(ref) + Py_DecRef(space, ref) - def test_eq(self, space, api): - assert 1 == api._PyBytes_Eq(space.newbytes("hello"), space.newbytes("hello")) - assert 0 == api._PyBytes_Eq(space.newbytes("hello"), space.newbytes("world")) + def test_eq(self, space): + assert 1 == _PyBytes_Eq(space, space.newbytes("hello"), space.newbytes("hello")) + assert 0 == _PyBytes_Eq(space, space.newbytes("hello"), space.newbytes("world")) - def test_join(self, space, api): + def test_join(self, space): w_sep = space.newbytes('') w_seq = space.newtuple([space.newbytes('a'), space.newbytes('b')]) - w_joined = api._PyBytes_Join(w_sep, w_seq) + w_joined = _PyBytes_Join(space, w_sep, w_seq) assert space.bytes_w(w_joined) == 'ab' def test_FromObject(self, space, api): @@ -310,6 +312,5 @@ w_obj = space.call_function(space.w_bytearray, w_obj) assert space.eq_w(w_obj, api.PyBytes_FromObject(w_obj)) w_obj = space.wrap(u"test") - assert api.PyBytes_FromObject(w_obj) is None - api.PyErr_Clear() - + with raises_w(space, TypeError): + api.PyBytes_FromObject(w_obj) From pypy.commits at gmail.com Tue Jul 4 16:28:37 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 13:28:37 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: fix test_exception.py and stop using api object Message-ID: <595bfa75.0a9e1c0a.1184c.01a3@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91684:1235016e2d2c Date: 2017-07-04 21:27 +0100 http://bitbucket.org/pypy/pypy/changeset/1235016e2d2c/ Log: fix test_exception.py and stop using api object diff --git a/pypy/module/cpyext/test/test_exception.py b/pypy/module/cpyext/test/test_exception.py --- a/pypy/module/cpyext/test/test_exception.py +++ b/pypy/module/cpyext/test/test_exception.py @@ -1,32 +1,37 @@ -from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.pyobject import make_ref +from pypy.module.cpyext.exception import ( + PyExceptionInstance_Class, PyException_GetTraceback, + PyException_SetTraceback, PyException_GetContext, PyException_SetContext, + PyException_GetCause, PyException_SetCause) class TestExceptions(BaseApiTest): - def test_ExceptionInstance_Class(self, space, api): + def test_ExceptionInstance_Class(self, space): w_instance = space.call_function(space.w_ValueError) - assert api.PyExceptionInstance_Class(w_instance) is space.w_ValueError + assert PyExceptionInstance_Class( + space, w_instance) is space.w_ValueError - def test_traceback(self, space, api): + def test_traceback(self, space): w_exc = space.call_function(space.w_ValueError) - assert api.PyException_GetTraceback(w_exc) is None - assert api.PyException_SetTraceback(w_exc, space.wrap(1)) == -1 - api.PyErr_Clear() + assert PyException_GetTraceback(space, w_exc) is None + with raises_w(space, TypeError): + PyException_SetTraceback(space, w_exc, space.wrap(1)) - def test_context(self, space, api): + def test_context(self, space): w_exc = space.call_function(space.w_ValueError) - assert api.PyException_GetContext(w_exc) is None + assert PyException_GetContext(space, w_exc) is None w_ctx = space.call_function(space.w_IndexError) - api.PyException_SetContext(w_exc, make_ref(space, w_ctx)) - assert space.is_w(api.PyException_GetContext(w_exc), w_ctx) + PyException_SetContext(space, w_exc, make_ref(space, w_ctx)) + assert space.is_w(PyException_GetContext(space, w_exc), w_ctx) - def test_cause(self, space, api): + def test_cause(self, space): w_exc = space.call_function(space.w_ValueError) - assert api.PyException_GetCause(w_exc) is None + assert PyException_GetCause(space, w_exc) is None w_cause = space.call_function(space.w_IndexError) - api.PyException_SetCause(w_exc, make_ref(space, w_cause)) - assert space.is_w(api.PyException_GetCause(w_exc), w_cause) + PyException_SetCause(space, w_exc, make_ref(space, w_cause)) + assert space.is_w(PyException_GetCause(space, w_exc), w_cause) class AppTestExceptions(AppTestCpythonExtensionBase): @@ -38,6 +43,5 @@ return PyTuple_Pack(2, PyExc_EnvironmentError, PyExc_IOError); - """), - ]) + """)]) assert module.get_aliases() == (OSError, OSError) From pypy.commits at gmail.com Tue Jul 4 16:44:20 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 13:44:20 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <595bfe24.c4da1c0a.97145.4fd6@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91685:c85122eab568 Date: 2017-07-04 21:43 +0100 http://bitbucket.org/pypy/pypy/changeset/c85122eab568/ Log: fix test diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -383,7 +383,7 @@ return Py_False; } return args->ob_type->tp_descr_get(args, NULL, - (PyObject *)&PyInt_Type); + (PyObject *)&PyLong_Type); ''' ) ]) From pypy.commits at gmail.com Tue Jul 4 17:01:02 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 04 Jul 2017 14:01:02 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <595c020e.962f1c0a.50d6e.cca5@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91686:33527bb8058e Date: 2017-07-04 22:00 +0100 http://bitbucket.org/pypy/pypy/changeset/33527bb8058e/ Log: hg merge default diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -58,6 +58,7 @@ SHARED.join('compat.c'), SHARED.join('machine.c'), SHARED.join('vmp_stack.c'), + SHARED.join('vmprof_main.c'), # symbol table already in separate_module_files ] + separate_module_files, post_include_bits=[], diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.c b/rpython/rlib/rvmprof/src/shared/vmprof_main.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.c @@ -0,0 +1,30 @@ +#ifdef VMPROF_UNIX + +#include +/* value: LSB bit is 1 if signals must be ignored; all other bits + are a counter for how many threads are currently in a signal handler */ +static long volatile signal_handler_value = 1; + +void vmprof_ignore_signals(int ignored) +{ + if (!ignored) { + __sync_fetch_and_and(&signal_handler_value, ~1L); + } else { + /* set the last bit, and wait until concurrently-running signal + handlers finish */ + while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { + usleep(1); + } + } +} + +long vmprof_enter_signal(void) +{ + return __sync_fetch_and_add(&signal_handler_value, 2L); +} + +long vmprof_exit_signal(void) +{ + return __sync_sub_and_fetch(&signal_handler_value, 2L); +} +#endif diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.h b/rpython/rlib/rvmprof/src/shared/vmprof_main.h --- a/rpython/rlib/rvmprof/src/shared/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.h @@ -60,25 +60,9 @@ /************************************************************/ -/* value: last bit is 1 if signals must be ignored; all other bits - are a counter for how many threads are currently in a signal handler */ -static long volatile signal_handler_value = 1; - -RPY_EXTERN -void vmprof_ignore_signals(int ignored) -{ - if (!ignored) { - __sync_fetch_and_and(&signal_handler_value, ~1L); - } - else { - /* set the last bit, and wait until concurrently-running signal - handlers finish */ - while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { - usleep(1); - } - } -} - +RPY_EXTERN void vmprof_ignore_signals(int ignored); +RPY_EXTERN long vmprof_enter_signal(void); +RPY_EXTERN long vmprof_exit_signal(void); /* ************************************************************* * functions to write a profile file compatible with gperftools @@ -276,7 +260,7 @@ __sync_lock_release(&spinlock); #endif - long val = __sync_fetch_and_add(&signal_handler_value, 2L); + long val = vmprof_enter_signal(); if ((val & 1) == 0) { int saved_errno = errno; @@ -307,7 +291,7 @@ errno = saved_errno; } - __sync_sub_and_fetch(&signal_handler_value, 2L); + vmprof_exit_signal(); } From pypy.commits at gmail.com Tue Jul 4 17:45:22 2017 From: pypy.commits at gmail.com (arigo) Date: Tue, 04 Jul 2017 14:45:22 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <595c0c72.848ddf0a.f8f29.c350@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r895:12ddca8a30af Date: 2017-07-04 23:45 +0200 http://bitbucket.org/pypy/pypy.org/changeset/12ddca8a30af/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $67077 of $105000 (63.9%) + $67112 of $105000 (63.9%)
@@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $59070 of $80000 (73.8%) + $59080 of $80000 (73.9%)
    @@ -29,7 +29,7 @@ - $67112 of $105000 (63.9%) + $67126 of $105000 (63.9%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Thu Jul 6 13:18:52 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 06 Jul 2017 10:18:52 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Ensure that mappingproxy is recognised as a mapping, not a sequence Message-ID: <595e70fc.4c3e1c0a.63a88.883f@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91709:78dee6627756 Date: 2017-07-06 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/78dee6627756/ Log: Ensure that mappingproxy is recognised as a mapping, not a sequence diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -93,3 +93,7 @@ copy=interp2app(W_DictProxyObject.copy_w), **cmp_methods ) + +def _set_flag_map_or_seq(space): + w_type = space.gettypeobject(W_DictProxyObject.typedef) + w_type.flag_map_or_seq = 'M' diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -96,6 +96,8 @@ self.w_text = self.w_str del self.w_str self.w_dict.flag_map_or_seq = 'M' + from pypy.objspace.std import dictproxyobject + dictproxyobject._set_flag_map_or_seq(self) self.w_list.flag_map_or_seq = 'S' self.w_tuple.flag_map_or_seq = 'S' self.builtin_types['str'] = self.w_unicode diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -65,6 +65,7 @@ raises(TypeError, "proxy['a'] = 4") raises(TypeError, "del proxy['a']") raises(AttributeError, "proxy.clear()") + raises(TypeError, reversed, proxy) # class D(dict): def copy(self): return 3 From pypy.commits at gmail.com Fri Jul 7 02:37:57 2017 From: pypy.commits at gmail.com (mattip) Date: Thu, 06 Jul 2017 23:37:57 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-hash_notimpl: fix for test, skip if not translated Message-ID: <595f2c45.5a421c0a.be911.a352@mx.google.com> Author: Matti Picus Branch: cpyext-hash_notimpl Changeset: r91710:217e03c35f1f Date: 2017-07-07 09:36 +0300 http://bitbucket.org/pypy/pypy/changeset/217e03c35f1f/ Log: fix for test, skip if not translated diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1351,6 +1351,9 @@ class AppTestHashable(AppTestCpythonExtensionBase): def test_unhashable(self): + if not self.runappdirect: + skip('pointer to function equality available' + ' only after translation') module = self.import_extension('foo', [ ("new_obj", "METH_NOARGS", ''' diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -347,12 +347,9 @@ if len(slot_names) == 1: func = getattr(pto, slot_names[0]) if slot_names[0] == 'c_tp_hash': - # XXX if it is hash_not_impl, do not assign to dict_w - # name = rffi.charp2str(pto.c_tp_name) - # if 'foo' in name: - # import pdb;pdb.set_trace() if hash_not_impl == func: - # XXX never reached + # special case for tp_hash == PyObject_HashNotImplemented + dict_w[method_name] = space.w_None continue else: assert len(slot_names) == 2 From pypy.commits at gmail.com Fri Jul 7 02:56:18 2017 From: pypy.commits at gmail.com (mattip) Date: Thu, 06 Jul 2017 23:56:18 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: fix translation Message-ID: <595f3092.fa87df0a.75b14.f118@mx.google.com> Author: Matti Picus Branch: py3.5 Changeset: r91711:79da8a0088eb Date: 2017-07-07 09:55 +0300 http://bitbucket.org/pypy/pypy/changeset/79da8a0088eb/ Log: fix translation diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,6 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rgc # Force registration of gc.collect from rpython.rlib.buffer import RawBuffer +from rpython.rlib.rarithmetic import widen from pypy.interpreter.error import oefmt from pypy.interpreter.buffer import BufferView from pypy.module.cpyext.api import ( @@ -219,6 +220,7 @@ share a contiguous chunk of memory of "unsigned bytes" of the given length. Returns 0 on success and -1 (with raising an error) on error. """ + flags = widen(flags) if flags & PyBUF_WRITABLE and readonly: raise oefmt(space.w_ValueError, "Object is not writable") view.c_buf = buf From pypy.commits at gmail.com Fri Jul 7 03:16:05 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:05 -0700 (PDT) Subject: [pypy-commit] pypy jumbojet: close merged branch Message-ID: <595f3535.c5afdf0a.350ae.c6f6@mx.google.com> Author: Matti Picus Branch: jumbojet Changeset: r91714:3bc27f05c67f Date: 2017-07-07 09:49 +0300 http://bitbucket.org/pypy/pypy/changeset/3bc27f05c67f/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:01 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:01 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-obj-stealing: close merged branch Message-ID: <595f3531.ba85df0a.440e1.63d9@mx.google.com> Author: Matti Picus Branch: cpyext-obj-stealing Changeset: r91712:a25cad08ac53 Date: 2017-07-07 09:46 +0300 http://bitbucket.org/pypy/pypy/changeset/a25cad08ac53/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:03 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:03 -0700 (PDT) Subject: [pypy-commit] pypy ctypes_char_indexing: close merged branch Message-ID: <595f3533.028b1c0a.cab62.6b2c@mx.google.com> Author: Matti Picus Branch: ctypes_char_indexing Changeset: r91713:812bc6ea7285 Date: 2017-07-07 09:48 +0300 http://bitbucket.org/pypy/pypy/changeset/812bc6ea7285/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:11 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:11 -0700 (PDT) Subject: [pypy-commit] pypy sockopt_zero: close merged branch Message-ID: <595f353b.e2a0df0a.80d0c.2558@mx.google.com> Author: Matti Picus Branch: sockopt_zero Changeset: r91717:11a147bb5f8c Date: 2017-07-07 09:50 +0300 http://bitbucket.org/pypy/pypy/changeset/11a147bb5f8c/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:07 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:07 -0700 (PDT) Subject: [pypy-commit] pypy PyPy_profopt_enabled: close merged branch Message-ID: <595f3537.14b7df0a.11624.9295@mx.google.com> Author: Matti Picus Branch: PyPy_profopt_enabled Changeset: r91715:ae256800825b Date: 2017-07-07 09:49 +0300 http://bitbucket.org/pypy/pypy/changeset/ae256800825b/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:13 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:13 -0700 (PDT) Subject: [pypy-commit] pypy pypy_ctypes_nosegfault_nofastpath: close merged branch Message-ID: <595f353d.91101c0a.fa60a.4578@mx.google.com> Author: Matti Picus Branch: pypy_ctypes_nosegfault_nofastpath Changeset: r91718:43487a2e3f36 Date: 2017-07-07 09:51 +0300 http://bitbucket.org/pypy/pypy/changeset/43487a2e3f36/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:09 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:09 -0700 (PDT) Subject: [pypy-commit] pypy Kounavi/fix-typo-depricate-to-deprecate-p-1495624547235: close merged branch Message-ID: <595f3539.c20d1c0a.f445b.0490@mx.google.com> Author: Matti Picus Branch: Kounavi/fix-typo-depricate-to-deprecate-p-1495624547235 Changeset: r91716:03cc665a7161 Date: 2017-07-07 09:50 +0300 http://bitbucket.org/pypy/pypy/changeset/03cc665a7161/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:15 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:15 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-recursionlimit: close merged branch Message-ID: <595f353f.84e31c0a.806cb.e153@mx.google.com> Author: Matti Picus Branch: cpyext-recursionlimit Changeset: r91719:ec0618c52b09 Date: 2017-07-07 09:51 +0300 http://bitbucket.org/pypy/pypy/changeset/ec0618c52b09/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:17 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:17 -0700 (PDT) Subject: [pypy-commit] pypy sthalik/fix-signed-integer-sizes-1494493539409: close merged branch Message-ID: <595f3541.6facdf0a.31d67.67ad@mx.google.com> Author: Matti Picus Branch: sthalik/fix-signed-integer-sizes-1494493539409 Changeset: r91720:7660bfc93db9 Date: 2017-07-07 09:52 +0300 http://bitbucket.org/pypy/pypy/changeset/7660bfc93db9/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:19 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:19 -0700 (PDT) Subject: [pypy-commit] pypy vmprof-0.4.4: close merged branch Message-ID: <595f3543.cc331c0a.26b4e.4c1c@mx.google.com> Author: Matti Picus Branch: vmprof-0.4.4 Changeset: r91721:4113f4687b53 Date: 2017-07-07 09:53 +0300 http://bitbucket.org/pypy/pypy/changeset/4113f4687b53/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:21 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:21 -0700 (PDT) Subject: [pypy-commit] pypy vmprof-native: close merged branch Message-ID: <595f3545.42081c0a.f5da6.4b12@mx.google.com> Author: Matti Picus Branch: vmprof-native Changeset: r91722:8605f3ef66d1 Date: 2017-07-07 09:53 +0300 http://bitbucket.org/pypy/pypy/changeset/8605f3ef66d1/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:22 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:22 -0700 (PDT) Subject: [pypy-commit] pypy jumbo: close merged branch Message-ID: <595f3546.6e9adf0a.b054.4f3c@mx.google.com> Author: Matti Picus Branch: jumbo Changeset: r91723:7b37a51ac77c Date: 2017-07-07 09:54 +0300 http://bitbucket.org/pypy/pypy/changeset/7b37a51ac77c/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:24 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:24 -0700 (PDT) Subject: [pypy-commit] pypy fniephaus/fix-typo-1488123166752: close merged branch Message-ID: <595f3548.71a0df0a.7440c.d9bc@mx.google.com> Author: Matti Picus Branch: fniephaus/fix-typo-1488123166752 Changeset: r91724:4c4bd0be2674 Date: 2017-07-07 09:54 +0300 http://bitbucket.org/pypy/pypy/changeset/4c4bd0be2674/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:26 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:26 -0700 (PDT) Subject: [pypy-commit] pypy fix-global: close merged branch Message-ID: <595f354a.010d1c0a.711d8.0f2d@mx.google.com> Author: Matti Picus Branch: fix-global Changeset: r91725:e8e1dca47708 Date: 2017-07-07 09:57 +0300 http://bitbucket.org/pypy/pypy/changeset/e8e1dca47708/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:28 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:28 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-fix-decimal-module-name: close merged branch Message-ID: <595f354c.a693df0a.ca537.b6ef@mx.google.com> Author: Matti Picus Branch: py3.5-fix-decimal-module-name Changeset: r91726:913a3db1147a Date: 2017-07-07 09:57 +0300 http://bitbucket.org/pypy/pypy/changeset/913a3db1147a/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:30 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:30 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-fix-globals: close merged branch Message-ID: <595f354e.cc8edf0a.d0254.eb07@mx.google.com> Author: Matti Picus Branch: py3.5-fix-globals Changeset: r91727:bb7cbf013b8c Date: 2017-07-07 09:58 +0300 http://bitbucket.org/pypy/pypy/changeset/bb7cbf013b8c/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:32 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:32 -0700 (PDT) Subject: [pypy-commit] pypy optinfo-into-bridges-3: close merged branch Message-ID: <595f3550.e2a0df0a.80d0c.2581@mx.google.com> Author: Matti Picus Branch: optinfo-into-bridges-3 Changeset: r91728:25c88f3134cc Date: 2017-07-07 09:58 +0300 http://bitbucket.org/pypy/pypy/changeset/25c88f3134cc/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:33 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:33 -0700 (PDT) Subject: [pypy-commit] pypy space-wrap: close merged branch Message-ID: <595f3551.84e31c0a.806cb.e171@mx.google.com> Author: Matti Picus Branch: space-wrap Changeset: r91729:7a4a9d28b154 Date: 2017-07-07 09:59 +0300 http://bitbucket.org/pypy/pypy/changeset/7a4a9d28b154/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:35 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:35 -0700 (PDT) Subject: [pypy-commit] pypy fix_bool_restype: close merged branch Message-ID: <595f3553.5a421c0a.be911.ad57@mx.google.com> Author: Matti Picus Branch: fix_bool_restype Changeset: r91730:b2ea7e2e75b7 Date: 2017-07-07 09:59 +0300 http://bitbucket.org/pypy/pypy/changeset/b2ea7e2e75b7/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:37 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:37 -0700 (PDT) Subject: [pypy-commit] pypy vendor/stdlib-3.5: close merged branch Message-ID: <595f3555.2283df0a.3e328.9b27@mx.google.com> Author: Matti Picus Branch: vendor/stdlib-3.5 Changeset: r91731:8fde52c4931f Date: 2017-07-07 10:00 +0300 http://bitbucket.org/pypy/pypy/changeset/8fde52c4931f/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:39 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:39 -0700 (PDT) Subject: [pypy-commit] pypy TreeStain/main-lines-changed-in-l77-l83-made-para-1484471558033: close merged branch Message-ID: <595f3557.c8da1c0a.37125.ea60@mx.google.com> Author: Matti Picus Branch: TreeStain/main-lines-changed-in-l77-l83-made-para-1484471558033 Changeset: r91732:a267a451a7a4 Date: 2017-07-07 10:01 +0300 http://bitbucket.org/pypy/pypy/changeset/a267a451a7a4/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:41 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:41 -0700 (PDT) Subject: [pypy-commit] pypy TreeStain/fixed-typo-line-29-mostly-to-most-1484469416419: close merged branch Message-ID: <595f3559.6c8fdf0a.b44ac.de1d@mx.google.com> Author: Matti Picus Branch: TreeStain/fixed-typo-line-29-mostly-to-most-1484469416419 Changeset: r91733:5fcbdc6c7f51 Date: 2017-07-07 10:01 +0300 http://bitbucket.org/pypy/pypy/changeset/5fcbdc6c7f51/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:47 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:47 -0700 (PDT) Subject: [pypy-commit] pypy issue2444: close merged branch Message-ID: <595f355f.6facdf0a.31d67.67db@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r91735:cf4cdb87e76d Date: 2017-07-07 10:02 +0300 http://bitbucket.org/pypy/pypy/changeset/cf4cdb87e76d/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:50 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:50 -0700 (PDT) Subject: [pypy-commit] pypy vendor/stdlib-3.6: close merged branch Message-ID: <595f3562.8292df0a.13fc7.8ec2@mx.google.com> Author: Matti Picus Branch: vendor/stdlib-3.6 Changeset: r91736:93901e5d8602 Date: 2017-07-07 10:04 +0300 http://bitbucket.org/pypy/pypy/changeset/93901e5d8602/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:46 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:46 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-from2: close merged branch Message-ID: <595f355e.2b97df0a.8f6f5.4b20@mx.google.com> Author: Matti Picus Branch: cpyext-from2 Changeset: r91734:c456ec620126 Date: 2017-07-07 10:02 +0300 http://bitbucket.org/pypy/pypy/changeset/c456ec620126/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:53 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:53 -0700 (PDT) Subject: [pypy-commit] pypy better-PyDict_Next: close merged branch Message-ID: <595f3565.05b61c0a.c1682.2803@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r91737:04c94a09cc35 Date: 2017-07-07 10:05 +0300 http://bitbucket.org/pypy/pypy/changeset/04c94a09cc35/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:55 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:55 -0700 (PDT) Subject: [pypy-commit] pypy cling-support: close merged branch Message-ID: <595f3567.d61b1c0a.6739b.54c6@mx.google.com> Author: Matti Picus Branch: cling-support Changeset: r91738:ac2a3ef1fc6f Date: 2017-07-07 10:06 +0300 http://bitbucket.org/pypy/pypy/changeset/ac2a3ef1fc6f/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:16:56 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:16:56 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-ssl: close merged branch Message-ID: <595f3568.82961c0a.fea2b.f5c2@mx.google.com> Author: Matti Picus Branch: py3.5-ssl Changeset: r91739:70580517bb71 Date: 2017-07-07 10:06 +0300 http://bitbucket.org/pypy/pypy/changeset/70580517bb71/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:02 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:02 -0700 (PDT) Subject: [pypy-commit] pypy fix-struct-unpack-Q: close merged branch Message-ID: <595f356e.d0091c0a.44b4.e5a7@mx.google.com> Author: Matti Picus Branch: fix-struct-unpack-Q Changeset: r91741:ae249b0608aa Date: 2017-07-07 10:08 +0300 http://bitbucket.org/pypy/pypy/changeset/ae249b0608aa/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:04 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:04 -0700 (PDT) Subject: [pypy-commit] pypy newinitwarn: close merged branch Message-ID: <595f3570.6380df0a.bcb06.d982@mx.google.com> Author: Matti Picus Branch: newinitwarn Changeset: r91742:22a5c7b715a7 Date: 2017-07-07 10:09 +0300 http://bitbucket.org/pypy/pypy/changeset/22a5c7b715a7/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:06 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:06 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface2: close merged branch Message-ID: <595f3572.415f1c0a.2d5c8.dca8@mx.google.com> Author: Matti Picus Branch: buffer-interface2 Changeset: r91743:b78a87fd6f9e Date: 2017-07-07 10:09 +0300 http://bitbucket.org/pypy/pypy/changeset/b78a87fd6f9e/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:00 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:00 -0700 (PDT) Subject: [pypy-commit] pypy issue2446: close merged branch Message-ID: <595f356c.0f9ddf0a.1614.909e@mx.google.com> Author: Matti Picus Branch: issue2446 Changeset: r91740:e11180e99164 Date: 2017-07-07 10:07 +0300 http://bitbucket.org/pypy/pypy/changeset/e11180e99164/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:08 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:08 -0700 (PDT) Subject: [pypy-commit] pypy Tiberiumk/fix-2412-1476011166874: close merged branch Message-ID: <595f3574.c8da1c0a.37125.ea90@mx.google.com> Author: Matti Picus Branch: Tiberiumk/fix-2412-1476011166874 Changeset: r91744:8b2ca7fb64c5 Date: 2017-07-07 10:10 +0300 http://bitbucket.org/pypy/pypy/changeset/8b2ca7fb64c5/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:10 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:10 -0700 (PDT) Subject: [pypy-commit] pypy better-error-missing-self: close merged branch Message-ID: <595f3576.010d1c0a.711d8.0f6d@mx.google.com> Author: Matti Picus Branch: better-error-missing-self Changeset: r91745:76e20d2341d7 Date: 2017-07-07 10:10 +0300 http://bitbucket.org/pypy/pypy/changeset/76e20d2341d7/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:13 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:13 -0700 (PDT) Subject: [pypy-commit] pypy faulthandler: close merged branch Message-ID: <595f3579.0ca6df0a.d9014.1767@mx.google.com> Author: Matti Picus Branch: faulthandler Changeset: r91746:e1ce0b024b58 Date: 2017-07-07 10:11 +0300 http://bitbucket.org/pypy/pypy/changeset/e1ce0b024b58/ Log: close merged branch From pypy.commits at gmail.com Fri Jul 7 03:17:15 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:15 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head a25cad08ac53 on branch cpyext-obj-stealing Message-ID: <595f357b.0ca8df0a.af2c1.2855@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91747:f667d97fd5b5 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/f667d97fd5b5/ Log: Merge closed head a25cad08ac53 on branch cpyext-obj-stealing From pypy.commits at gmail.com Fri Jul 7 03:17:17 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:17 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 812bc6ea7285 on branch ctypes_char_indexing Message-ID: <595f357d.ce1b1c0a.8f6bb.9fbf@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91748:ff6dc54f025c Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/ff6dc54f025c/ Log: Merge closed head 812bc6ea7285 on branch ctypes_char_indexing From pypy.commits at gmail.com Fri Jul 7 03:17:19 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:19 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3bc27f05c67f on branch jumbojet Message-ID: <595f357f.a198df0a.46f6f.229a@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91749:3a352724c0c4 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/3a352724c0c4/ Log: Merge closed head 3bc27f05c67f on branch jumbojet From pypy.commits at gmail.com Fri Jul 7 03:17:21 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:21 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head ae256800825b on branch PyPy_profopt_enabled Message-ID: <595f3581.cc8edf0a.d0254.eb48@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91750:4cfc3fa2b229 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/4cfc3fa2b229/ Log: Merge closed head ae256800825b on branch PyPy_profopt_enabled From pypy.commits at gmail.com Fri Jul 7 03:17:23 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:23 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 03cc665a7161 on branch Kounavi/fix-typo-depricate-to-deprecate-p-1495624547235 Message-ID: <595f3583.d696df0a.26fd5.0d12@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91751:9f7b7fe8d657 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/9f7b7fe8d657/ Log: Merge closed head 03cc665a7161 on branch Kounavi/fix-typo-depricate- to-deprecate-p-1495624547235 From pypy.commits at gmail.com Fri Jul 7 03:17:25 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:25 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 11a147bb5f8c on branch sockopt_zero Message-ID: <595f3585.cc8edf0a.d0254.eb51@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91752:e74d37268068 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e74d37268068/ Log: Merge closed head 11a147bb5f8c on branch sockopt_zero From pypy.commits at gmail.com Fri Jul 7 03:17:26 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:26 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 43487a2e3f36 on branch pypy_ctypes_nosegfault_nofastpath Message-ID: <595f3586.e7a9df0a.c4032.bf00@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91753:b6e9b005197b Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/b6e9b005197b/ Log: Merge closed head 43487a2e3f36 on branch pypy_ctypes_nosegfault_nofastpath From pypy.commits at gmail.com Fri Jul 7 03:17:28 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:28 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head ec0618c52b09 on branch cpyext-recursionlimit Message-ID: <595f3588.d696df0a.26fd5.0d1a@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91754:e0c207c40a72 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e0c207c40a72/ Log: Merge closed head ec0618c52b09 on branch cpyext-recursionlimit From pypy.commits at gmail.com Fri Jul 7 03:17:30 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:30 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7660bfc93db9 on branch sthalik/fix-signed-integer-sizes-1494493539409 Message-ID: <595f358a.c4921c0a.cf981.1d10@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91755:af8c72efd79d Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/af8c72efd79d/ Log: Merge closed head 7660bfc93db9 on branch sthalik/fix-signed-integer- sizes-1494493539409 From pypy.commits at gmail.com Fri Jul 7 03:17:32 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:32 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4113f4687b53 on branch vmprof-0.4.4 Message-ID: <595f358c.cc331c0a.26b4e.4c7b@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91756:a8aea21f86eb Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/a8aea21f86eb/ Log: Merge closed head 4113f4687b53 on branch vmprof-0.4.4 From pypy.commits at gmail.com Fri Jul 7 03:17:34 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:34 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8605f3ef66d1 on branch vmprof-native Message-ID: <595f358e.d0141c0a.c34da.8f84@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91757:df6435aad18e Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/df6435aad18e/ Log: Merge closed head 8605f3ef66d1 on branch vmprof-native From pypy.commits at gmail.com Fri Jul 7 03:17:36 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:36 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7b37a51ac77c on branch jumbo Message-ID: <595f3590.c68b1c0a.b336f.3b30@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91758:323e7b202316 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/323e7b202316/ Log: Merge closed head 7b37a51ac77c on branch jumbo From pypy.commits at gmail.com Fri Jul 7 03:17:37 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:37 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4c4bd0be2674 on branch fniephaus/fix-typo-1488123166752 Message-ID: <595f3591.16a5df0a.fdff0.d802@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91759:8fd4733d41fe Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/8fd4733d41fe/ Log: Merge closed head 4c4bd0be2674 on branch fniephaus/fix- typo-1488123166752 From pypy.commits at gmail.com Fri Jul 7 03:17:39 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:39 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head e8e1dca47708 on branch fix-global Message-ID: <595f3593.e9addf0a.93950.c9bd@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91760:758d118694af Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/758d118694af/ Log: Merge closed head e8e1dca47708 on branch fix-global From pypy.commits at gmail.com Fri Jul 7 03:17:41 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:41 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 913a3db1147a on branch py3.5-fix-decimal-module-name Message-ID: <595f3595.150e1c0a.a20fa.408e@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91761:420260018180 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/420260018180/ Log: Merge closed head 913a3db1147a on branch py3.5-fix-decimal-module- name From pypy.commits at gmail.com Fri Jul 7 03:17:43 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:43 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head bb7cbf013b8c on branch py3.5-fix-globals Message-ID: <595f3597.81581c0a.26bd9.33db@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91762:303ac95dd6d1 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/303ac95dd6d1/ Log: Merge closed head bb7cbf013b8c on branch py3.5-fix-globals From pypy.commits at gmail.com Fri Jul 7 03:17:45 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:45 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 25c88f3134cc on branch optinfo-into-bridges-3 Message-ID: <595f3599.05b61c0a.c1682.284b@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91763:e4cdf47f9b43 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e4cdf47f9b43/ Log: Merge closed head 25c88f3134cc on branch optinfo-into-bridges-3 From pypy.commits at gmail.com Fri Jul 7 03:17:48 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:48 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head b2ea7e2e75b7 on branch fix_bool_restype Message-ID: <595f359c.c4001c0a.1f5ea.c0b6@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91765:602065614f03 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/602065614f03/ Log: Merge closed head b2ea7e2e75b7 on branch fix_bool_restype From pypy.commits at gmail.com Fri Jul 7 03:17:50 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:50 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8fde52c4931f on branch vendor/stdlib-3.5 Message-ID: <595f359e.c5d51c0a.df10b.07ea@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91766:06bfae97b0ac Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/06bfae97b0ac/ Log: Merge closed head 8fde52c4931f on branch vendor/stdlib-3.5 From pypy.commits at gmail.com Fri Jul 7 03:17:51 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:51 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head a267a451a7a4 on branch TreeStain/main-lines-changed-in-l77-l83-made-para-1484471558033 Message-ID: <595f359f.3486df0a.694d7.3fc8@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91767:3ab812a02cac Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/3ab812a02cac/ Log: Merge closed head a267a451a7a4 on branch TreeStain/main-lines- changed-in-l77-l83-made-para-1484471558033 From pypy.commits at gmail.com Fri Jul 7 03:17:46 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:46 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7a4a9d28b154 on branch space-wrap Message-ID: <595f359a.c5d51c0a.df10b.07e5@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91764:10adf6640ad7 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/10adf6640ad7/ Log: Merge closed head 7a4a9d28b154 on branch space-wrap From pypy.commits at gmail.com Fri Jul 7 03:17:53 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:53 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 5fcbdc6c7f51 on branch TreeStain/fixed-typo-line-29-mostly-to-most-1484469416419 Message-ID: <595f35a1.ce1b1c0a.8f6bb.9fee@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91768:53fe68493237 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/53fe68493237/ Log: Merge closed head 5fcbdc6c7f51 on branch TreeStain/fixed-typo- line-29-mostly-to-most-1484469416419 From pypy.commits at gmail.com Fri Jul 7 03:17:55 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:55 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head c456ec620126 on branch cpyext-from2 Message-ID: <595f35a3.14b7df0a.11624.92fb@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91769:7c3b4bf0e44c Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/7c3b4bf0e44c/ Log: Merge closed head c456ec620126 on branch cpyext-from2 From pypy.commits at gmail.com Fri Jul 7 03:17:56 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:56 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head cf4cdb87e76d on branch issue2444 Message-ID: <595f35a4.ec9adf0a.ec62c.32c8@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91770:f650a9b140d2 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/f650a9b140d2/ Log: Merge closed head cf4cdb87e76d on branch issue2444 From pypy.commits at gmail.com Fri Jul 7 03:17:58 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:17:58 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 93901e5d8602 on branch vendor/stdlib-3.6 Message-ID: <595f35a6.925b1c0a.9c214.2c21@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91771:86d2d0611fc0 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/86d2d0611fc0/ Log: Merge closed head 93901e5d8602 on branch vendor/stdlib-3.6 From pypy.commits at gmail.com Fri Jul 7 03:18:00 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:00 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 04c94a09cc35 on branch better-PyDict_Next Message-ID: <595f35a8.42081c0a.f5da6.4b86@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91772:8b96f66f1962 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/8b96f66f1962/ Log: Merge closed head 04c94a09cc35 on branch better-PyDict_Next From pypy.commits at gmail.com Fri Jul 7 03:18:02 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:02 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head ac2a3ef1fc6f on branch cling-support Message-ID: <595f35aa.06b71c0a.82097.053e@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91773:4845b3ebb2cd Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/4845b3ebb2cd/ Log: Merge closed head ac2a3ef1fc6f on branch cling-support From pypy.commits at gmail.com Fri Jul 7 03:18:04 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:04 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 70580517bb71 on branch py3.5-ssl Message-ID: <595f35ac.c288df0a.97150.cce7@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91774:4b316352d0eb Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/4b316352d0eb/ Log: Merge closed head 70580517bb71 on branch py3.5-ssl From pypy.commits at gmail.com Fri Jul 7 03:18:05 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:05 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head e11180e99164 on branch issue2446 Message-ID: <595f35ad.c4921c0a.cf981.1d3c@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91775:513c7bd67193 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/513c7bd67193/ Log: Merge closed head e11180e99164 on branch issue2446 From pypy.commits at gmail.com Fri Jul 7 03:18:07 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:07 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head ae249b0608aa on branch fix-struct-unpack-Q Message-ID: <595f35af.d399df0a.6e8dc.ed25@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91776:957882d1662d Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/957882d1662d/ Log: Merge closed head ae249b0608aa on branch fix-struct-unpack-Q From pypy.commits at gmail.com Fri Jul 7 03:18:09 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:09 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 22a5c7b715a7 on branch newinitwarn Message-ID: <595f35b1.89e01c0a.6a6f4.b3ab@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91777:26c4e772f138 Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/26c4e772f138/ Log: Merge closed head 22a5c7b715a7 on branch newinitwarn From pypy.commits at gmail.com Fri Jul 7 03:18:11 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:11 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head b78a87fd6f9e on branch buffer-interface2 Message-ID: <595f35b3.95061c0a.16843.fa84@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91778:da28035fc98e Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/da28035fc98e/ Log: Merge closed head b78a87fd6f9e on branch buffer-interface2 From pypy.commits at gmail.com Fri Jul 7 03:18:13 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:13 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8b2ca7fb64c5 on branch Tiberiumk/fix-2412-1476011166874 Message-ID: <595f35b5.c33e1c0a.f62b7.714c@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91779:b6bae261111c Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/b6bae261111c/ Log: Merge closed head 8b2ca7fb64c5 on branch Tiberiumk/fix-2412-1476011166874 From pypy.commits at gmail.com Fri Jul 7 03:18:15 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:15 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 76e20d2341d7 on branch better-error-missing-self Message-ID: <595f35b7.82961c0a.fea2b.f5fe@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91780:2f72d0f48d1b Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/2f72d0f48d1b/ Log: Merge closed head 76e20d2341d7 on branch better-error-missing-self From pypy.commits at gmail.com Fri Jul 7 03:18:17 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:17 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head e1ce0b024b58 on branch faulthandler Message-ID: <595f35b9.8ea3df0a.4d66.b02f@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91781:e63c989b068e Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e63c989b068e/ Log: Merge closed head e1ce0b024b58 on branch faulthandler From pypy.commits at gmail.com Fri Jul 7 03:18:19 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:18:19 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <595f35bb.a1abdf0a.abeb5.6f57@mx.google.com> Author: Matti Picus Branch: closed-branches Changeset: r91782:4ef4754b8add Date: 2017-07-07 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/4ef4754b8add/ Log: re-close this branch From pypy.commits at gmail.com Fri Jul 7 03:51:50 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:51:50 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-hash_notimpl: close branch to be merged Message-ID: <595f3d96.c4da1c0a.21be4.1d8a@mx.google.com> Author: Matti Picus Branch: cpyext-hash_notimpl Changeset: r91783:8d6a71064d34 Date: 2017-07-07 10:46 +0300 http://bitbucket.org/pypy/pypy/changeset/8d6a71064d34/ Log: close branch to be merged From pypy.commits at gmail.com Fri Jul 7 03:51:52 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 00:51:52 -0700 (PDT) Subject: [pypy-commit] pypy default: merge branch to set __hash__ to None if tp_hash is PyObject_HashNotImplemented Message-ID: <595f3d98.810f1c0a.9a24f.85c0@mx.google.com> Author: Matti Picus Branch: Changeset: r91784:28c29133a557 Date: 2017-07-07 10:50 +0300 http://bitbucket.org/pypy/pypy/changeset/28c29133a557/ Log: merge branch to set __hash__ to None if tp_hash is PyObject_HashNotImplemented diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -21,3 +21,7 @@ .. branch: issue-2592 CPyext PyListObject.pop must return the value + +.. branch: cpyext-hash_notimpl + +If ``tp_hash`` is ``PyObject_HashNotImplemented``, set ``obj.__dict__['__hash__']`` to None diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1347,3 +1347,33 @@ Bsize = module.get_basicsize(B) assert Asize == Bsize assert Asize > basesize + + +class AppTestHashable(AppTestCpythonExtensionBase): + def test_unhashable(self): + if not self.runappdirect: + skip('pointer to function equality available' + ' only after translation') + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + obj = PyObject_New(PyObject, &Foo_Type); + return obj; + ''' + )], prologue=''' + static PyTypeObject Foo_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo", + }; + ''', more_init = ''' + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo_Type.tp_hash = PyObject_HashNotImplemented; + if (PyType_Ready(&Foo_Type) < 0) INITERROR; + ''') + obj = module.new_obj() + raises(TypeError, hash, obj) + assert type(obj).__dict__['__hash__'] is None + # this is equivalent to + from collections import Hashable + assert not isinstance(obj, Hashable) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -338,13 +338,19 @@ setattr(struct, slot_names[1], slot_func_helper) def add_operators(space, dict_w, pto): - # XXX support PyObject_HashNotImplemented + from pypy.module.cpyext.object import PyObject_HashNotImplemented + hash_not_impl = PyObject_HashNotImplemented.api_func.get_llhelper(space) for method_name, slot_names, wrapper_func, wrapper_func_kwds, doc in slotdefs_for_wrappers: if method_name in dict_w: continue offset = [rffi.offsetof(lltype.typeOf(pto).TO, slot_names[0])] if len(slot_names) == 1: func = getattr(pto, slot_names[0]) + if slot_names[0] == 'c_tp_hash': + if hash_not_impl == func: + # special case for tp_hash == PyObject_HashNotImplemented + dict_w[method_name] = space.w_None + continue else: assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) From pypy.commits at gmail.com Fri Jul 7 08:03:10 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 07 Jul 2017 05:03:10 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: merge default into branch Message-ID: <595f787e.5e361c0a.905a7.b793@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91785:b2f03ffd8457 Date: 2017-07-07 10:53 +0300 http://bitbucket.org/pypy/pypy/changeset/b2f03ffd8457/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -21,3 +21,7 @@ .. branch: issue-2592 CPyext PyListObject.pop must return the value + +.. branch: cpyext-hash_notimpl + +If ``tp_hash`` is ``PyObject_HashNotImplemented``, set ``obj.__dict__['__hash__']`` to None diff --git a/pypy/module/_cffi_backend/errorbox.py b/pypy/module/_cffi_backend/errorbox.py --- a/pypy/module/_cffi_backend/errorbox.py +++ b/pypy/module/_cffi_backend/errorbox.py @@ -86,8 +86,6 @@ return w_text = self.space.call_function(w_done) - # XXX Python 3: MessageBoxA() => MessageBoxW() - p = rffi.str2charp(self.space.bytes_w(w_text), track_allocation=False) if self.text_p: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -456,13 +456,15 @@ return decorate def api_func_from_cdef(func, cdef, cts, - error=_NOT_SPECIFIED, header=DEFAULT_HEADER): + error=_NOT_SPECIFIED, header=DEFAULT_HEADER, + result_is_ll=False): func._always_inline_ = 'try' cdecl = cts.parse_func(cdef) RESULT = cdecl.get_llresult(cts) api_function = ApiFunction( cdecl.get_llargs(cts), RESULT, func, - error=_compute_error(error, RESULT), cdecl=cdecl) + error=_compute_error(error, RESULT), cdecl=cdecl, + result_is_ll=result_is_ll) FUNCTIONS_BY_HEADER[header][cdecl.name] = api_function unwrapper = api_function.get_unwrapper() unwrapper.func = func @@ -656,10 +658,12 @@ class CpyextTypeSpace(CTypeSpace): - def decl(self, cdef, error=_NOT_SPECIFIED, header=DEFAULT_HEADER): + def decl(self, cdef, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, + result_is_ll=False): def decorate(func): return api_func_from_cdef( - func, cdef, self, error=error, header=header) + func, cdef, self, error=error, header=header, + result_is_ll=result_is_ll) return decorate diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,10 +1,171 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import rgc # Force registration of gc.collect +from rpython.rlib.buffer import RawBuffer from pypy.interpreter.error import oefmt +from pypy.interpreter.buffer import BufferView from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, cts, Py_buffer, - Py_ssize_t, Py_ssize_tP, generic_cpy_call, + cpython_api, Py_buffer, Py_ssize_t, Py_ssize_tP, CONST_STRINGP, cts, + generic_cpy_call, PyBUF_WRITABLE, PyBUF_FORMAT, PyBUF_ND, PyBUF_STRIDES) -from pypy.module.cpyext.pyobject import PyObject, Py_IncRef +from pypy.module.cpyext.typeobjectdefs import releasebufferproc +from pypy.module.cpyext.pyobject import PyObject, incref, decref, as_pyobj + +class CBuffer(RawBuffer): + _immutable_ = True + def __init__(self, view): + self.view = view + self.readonly = view.readonly + + def getlength(self): + return self.view.getlength() + + def getitem(self, index): + return self.view.ptr[index] + + def getslice(self, start, stop, step, size): + assert step == 1 + assert stop - start == size + ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), start) + return rffi.charpsize2str(ptr, size) + + def setitem(self, index, char): + self.view.ptr[index] = char + + def setslice(self, index, s): + assert s is not None + ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), index) + rffi.str2chararray(s, ptr, len(s)) + + def get_raw_address(self): + return cts.cast('char *', self.view.ptr) + +class CPyBuffer(BufferView): + # Similar to Py_buffer + _immutable_ = True + + def __init__(self, space, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True, + needs_decref=False, + releasebufferproc=rffi.cast(rffi.VOIDP, 0)): + self.space = space + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + self.pyobj = as_pyobj(space, w_obj) + self.format = format + self.ndim = ndim + self.itemsize = itemsize + + if not shape: + self.shape = [size] + else: + self.shape = shape + if not strides: + self.strides = [1] + else: + self.strides = strides + self.readonly = readonly + self.needs_decref = needs_decref + self.releasebufferproc = releasebufferproc + + def releasebuffer(self): + if self.pyobj: + if self.needs_decref: + if self.releasebufferproc: + func_target = rffi.cast(releasebufferproc, self.releasebufferproc) + with lltype.scoped_alloc(Py_buffer) as pybuf: + pybuf.c_buf = self.ptr + pybuf.c_len = self.size + pybuf.c_ndim = cts.cast('int', self.ndim) + pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) + pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) + for i in range(self.ndim): + pybuf.c_shape[i] = self.shape[i] + pybuf.c_strides[i] = self.strides[i] + if self.format: + pybuf.c_format = rffi.str2charp(self.format) + else: + pybuf.c_format = rffi.str2charp("B") + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + decref(self.space, self.pyobj) + self.pyobj = lltype.nullptr(PyObject.TO) + else: + #do not call twice + return + + def getlength(self): + return self.size + + def getbytes(self, start, size): + return ''.join([self.ptr[i] for i in range(start, start + size)]) + + def setbytes(self, start, string): + # absolutely no safety checks, what could go wrong? + for i in range(len(string)): + self.ptr[start + i] = string[i] + + def as_str(self): + return CBuffer(self).as_str() + + def as_readbuf(self): + return CBuffer(self) + + def as_writebuf(self): + assert not self.readonly + return CBuffer(self) + + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + + def getformat(self): + return self.format + + def getshape(self): + return self.shape + + def getstrides(self): + return self.strides + + def getitemsize(self): + return self.itemsize + + def getndim(self): + return self.ndim + +class FQ(rgc.FinalizerQueue): + Class = CPyBuffer + def finalizer_trigger(self): + while 1: + buf = self.next_dead() + if not buf: + break + buf.releasebuffer() + +fq = FQ() + + + at cpython_api([PyObject, CONST_STRINGP, Py_ssize_tP], rffi.INT_real, error=-1) +def PyObject_AsCharBuffer(space, obj, bufferp, sizep): + """Returns a pointer to a read-only memory location usable as + character-based input. The obj argument must support the single-segment + character buffer interface. On success, returns 0, sets buffer to the + memory location and size to the buffer length. Returns -1 and sets a + TypeError on error. + """ + pto = obj.c_ob_type + pb = pto.c_tp_as_buffer + if not (pb and pb.c_bf_getreadbuffer and pb.c_bf_getsegcount): + raise oefmt(space.w_TypeError, "expected a character buffer object") + if generic_cpy_call(space, pb.c_bf_getsegcount, + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: + raise oefmt(space.w_TypeError, + "expected a single-segment buffer object") + size = generic_cpy_call(space, pb.c_bf_getcharbuffer, + obj, 0, bufferp) + if size < 0: + return -1 + sizep[0] = size + return 0 @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, lltype.Signed, lltype.Signed], rffi.INT, error=-1) @@ -20,7 +181,7 @@ view.c_len = length view.c_obj = obj if obj: - Py_IncRef(space, obj) + incref(space, obj) view.c_itemsize = 1 rffi.setintfield(view, 'c_readonly', readonly) rffi.setintfield(view, 'c_ndim', 1) diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h --- a/pypy/module/cpyext/include/memoryobject.h +++ b/pypy/module/cpyext/include/memoryobject.h @@ -5,14 +5,7 @@ extern "C" { #endif -/* The struct is declared here but it shouldn't - be considered public. Don't access those fields directly, - use the functions instead! */ -typedef struct { - PyObject_HEAD - Py_buffer view; -} PyMemoryViewObject; - +#include "cpyext_memoryobject.h" /* Get a pointer to the memoryview's private copy of the exporter's buffer. */ #define PyMemoryView_GET_BUFFER(op) (&((PyMemoryViewObject *)(op))->view) diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,29 +1,22 @@ -from rpython.rlib.objectmodel import keepalive_until_here -from pypy.interpreter.error import oefmt from pypy.module.cpyext.api import ( - cpython_api, Py_buffer, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, - build_type_checkers, Py_ssize_tP, PyObjectFields, cpython_struct, - bootstrap_function, Py_bufferP, slot_function, generic_cpy_call) + cpython_api, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, + Py_ssize_tP, cts, parse_dir, bootstrap_function, Py_bufferP, slot_function) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, as_pyobj, decref, from_ref, make_typedescr, + PyObject, make_ref, decref, from_ref, make_typedescr, get_typedescr, track_reference) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen from pypy.objspace.std.memoryobject import W_MemoryView from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import +from pypy.module.cpyext.buffer import CPyBuffer, fq + +cts.parse_header(parse_dir / 'cpyext_memoryobject.h') +PyMemoryViewObject = cts.gettype('PyMemoryViewObject*') PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView") -PyMemoryViewObjectStruct = lltype.ForwardReference() -PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) -PyMemoryViewObjectFields = PyObjectFields + \ - (("view", Py_buffer),) -cpython_struct( - "PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct, - level=2) - @bootstrap_function def init_memoryobject(space): "Type description of PyDictObject" @@ -32,7 +25,7 @@ attach=memory_attach, dealloc=memory_dealloc, realize=memory_realize, - ) + ) def memory_attach(space, py_obj, w_obj, w_userdata=None): """ @@ -61,7 +54,6 @@ """ Creates the memory object in the interpreter """ - from pypy.module.cpyext.slotdefs import CPyBuffer, fq py_mem = rffi.cast(PyMemoryViewObject, obj) view = py_mem.c_view ndim = widen(view.c_ndim) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -3,7 +3,7 @@ cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyVarObject, size_t, slot_function, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, - Py_GE, CONST_STRING, CONST_STRINGP, FILEP, fwrite) + Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, from_ref, Py_IncRef, Py_DecRef, get_typedescr) @@ -432,30 +432,6 @@ is active then NULL is returned but PyErr_Occurred() will return false.""" return space.call_function(space.builtin.get('dir'), w_o) - at cpython_api([PyObject, CONST_STRINGP, Py_ssize_tP], rffi.INT_real, error=-1) -def PyObject_AsCharBuffer(space, obj, bufferp, sizep): - """Returns a pointer to a read-only memory location usable as - character-based input. The obj argument must support the single-segment - character buffer interface. On success, returns 0, sets buffer to the - memory location and size to the buffer length. Returns -1 and sets a - TypeError on error. - """ - pto = obj.c_ob_type - - pb = pto.c_tp_as_buffer - if not (pb and pb.c_bf_getreadbuffer and pb.c_bf_getsegcount): - raise oefmt(space.w_TypeError, "expected a character buffer object") - if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: - raise oefmt(space.w_TypeError, - "expected a single-segment buffer object") - size = generic_cpy_call(space, pb.c_bf_getcharbuffer, - obj, 0, bufferp) - if size < 0: - return -1 - sizep[0] = size - return 0 - # Also in include/object.h Py_PRINT_RAW = 1 # No string quotes etc. diff --git a/pypy/module/cpyext/parse/cpyext_memoryobject.h b/pypy/module/cpyext/parse/cpyext_memoryobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/parse/cpyext_memoryobject.h @@ -0,0 +1,7 @@ +/* The struct is declared here but it shouldn't + be considered public. Don't access those fields directly, + use the functions instead! */ +typedef struct { + PyObject_HEAD + Py_buffer view; +} PyMemoryViewObject; diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import widen -from rpython.rlib import rgc # Force registration of gc.collect from pypy.module.cpyext.api import ( slot_function, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, Py_buffer, Py_bufferP, PyTypeObjectPtr, cts) @@ -13,22 +12,20 @@ getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, getbufferproc, releasebufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import make_ref, decref, as_pyobj, from_ref + readbufferproc, getbufferproc, ssizessizeobjargproc) +from pypy.module.cpyext.pyobject import make_ref, decref, from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State from pypy.module.cpyext import userslot -from pypy.interpreter.buffer import BufferView +from pypy.module.cpyext.buffer import CBuffer, CPyBuffer, fq from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.argument import Arguments -from rpython.rlib.buffer import RawBuffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize, not_rpython from rpython.tool.sourcetools import func_renamer from rpython.flowspace.model import Constant from rpython.flowspace.specialcase import register_flow_sc -from rpython.rtyper.annlowlevel import llhelper from pypy.module.sys.version import CPYTHON_VERSION PY3 = CPYTHON_VERSION[0] == 3 @@ -324,141 +321,6 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.newint(res) -class CPyBuffer(BufferView): - # Similar to Py_buffer - _immutable_ = True - - def __init__(self, space, ptr, size, w_obj, format='B', shape=None, - strides=None, ndim=1, itemsize=1, readonly=True, - needs_decref=False, - releasebufferproc=rffi.cast(rffi.VOIDP, 0)): - self.space = space - self.ptr = ptr - self.size = size - self.w_obj = w_obj # kept alive - self.pyobj = as_pyobj(space, w_obj) - self.format = format - self.ndim = ndim - self.itemsize = itemsize - - if not shape: - self.shape = [size] - else: - self.shape = shape - if not strides: - self.strides = [1] - else: - self.strides = strides - self.readonly = readonly - self.needs_decref = needs_decref - self.releasebufferproc = releasebufferproc - - def releasebuffer(self): - if self.pyobj: - if self.needs_decref: - if self.releasebufferproc: - func_target = rffi.cast(releasebufferproc, self.releasebufferproc) - with lltype.scoped_alloc(Py_buffer) as pybuf: - pybuf.c_buf = self.ptr - pybuf.c_len = self.size - pybuf.c_ndim = cts.cast('int', self.ndim) - pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) - pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) - for i in range(self.ndim): - pybuf.c_shape[i] = self.shape[i] - pybuf.c_strides[i] = self.strides[i] - if self.format: - pybuf.c_format = rffi.str2charp(self.format) - else: - pybuf.c_format = rffi.str2charp("B") - generic_cpy_call(self.space, func_target, self.pyobj, pybuf) - decref(self.space, self.pyobj) - self.pyobj = lltype.nullptr(PyObject.TO) - else: - #do not call twice - return - - def getlength(self): - return self.size - - def getbytes(self, start, size): - return ''.join([self.ptr[i] for i in range(start, start + size)]) - - def setbytes(self, start, string): - # absolutely no safety checks, what could go wrong? - for i in range(len(string)): - self.ptr[start + i] = string[i] - - def as_str(self): - return CBuffer(self).as_str() - - def as_readbuf(self): - return CBuffer(self) - - def as_writebuf(self): - assert not self.readonly - return CBuffer(self) - - def get_raw_address(self): - return rffi.cast(rffi.CCHARP, self.ptr) - - def getformat(self): - return self.format - - def getshape(self): - return self.shape - - def getstrides(self): - return self.strides - - def getitemsize(self): - return self.itemsize - - def getndim(self): - return self.ndim - -class FQ(rgc.FinalizerQueue): - Class = CPyBuffer - def finalizer_trigger(self): - while 1: - buf = self.next_dead() - if not buf: - break - buf.releasebuffer() - -fq = FQ() - - -class CBuffer(RawBuffer): - _immutable_ = True - def __init__(self, view): - self.view = view - self.readonly = view.readonly - - def getlength(self): - return self.view.getlength() - - def getitem(self, index): - return self.view.ptr[index] - - def getslice(self, start, stop, step, size): - assert step == 1 - assert stop - start == size - ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), start) - return rffi.charpsize2str(ptr, size) - - def setitem(self, index, char): - self.view.ptr[index] = char - - def setslice(self, index, s): - assert s is not None - ptr = rffi.ptradd(cts.cast('char *', self.view.ptr), index) - rffi.str2chararray(s, ptr, len(s)) - - def get_raw_address(self): - return cts.cast('char *', self.view.ptr) - - def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -11,7 +11,7 @@ _PyString_Join) from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref -from pypy.module.cpyext.object import PyObject_AsCharBuffer +from pypy.module.cpyext.buffer import PyObject_AsCharBuffer from pypy.module.cpyext.api import PyTypeObjectPtr diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1425,3 +1425,31 @@ module.add_doc_string_method(a.foo_43) assert a.foo_43.__doc__ == "A docstring" +class AppTestHashable(AppTestCpythonExtensionBase): + def test_unhashable(self): + if not self.runappdirect: + skip('pointer to function equality available' + ' only after translation') + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + obj = PyObject_New(PyObject, &Foo_Type); + return obj; + ''' + )], prologue=''' + static PyTypeObject Foo_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo", + }; + ''', more_init = ''' + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo_Type.tp_hash = PyObject_HashNotImplemented; + if (PyType_Ready(&Foo_Type) < 0) INITERROR; + ''') + obj = module.new_obj() + raises(TypeError, hash, obj) + assert type(obj).__dict__['__hash__'] is None + # this is equivalent to + from collections import Hashable + assert not isinstance(obj, Hashable) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -345,13 +345,19 @@ setattr(struct, slot_names[1], slot_func_helper) def add_operators(space, dict_w, pto): - # XXX support PyObject_HashNotImplemented + from pypy.module.cpyext.object import PyObject_HashNotImplemented + hash_not_impl = PyObject_HashNotImplemented.api_func.get_llhelper(space) for method_name, slot_names, wrapper_func, wrapper_func_kwds, doc in slotdefs_for_wrappers: if method_name in dict_w: continue offset = [rffi.offsetof(lltype.typeOf(pto).TO, slot_names[0])] if len(slot_names) == 1: func = getattr(pto, slot_names[0]) + if slot_names[0] == 'c_tp_hash': + if hash_not_impl == func: + # special case for tp_hash == PyObject_HashNotImplemented + dict_w[method_name] = space.w_None + continue else: assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -58,6 +58,7 @@ SHARED.join('compat.c'), SHARED.join('machine.c'), SHARED.join('vmp_stack.c'), + SHARED.join('vmprof_main.c'), # symbol table already in separate_module_files ] + separate_module_files, post_include_bits=[], diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.c b/rpython/rlib/rvmprof/src/shared/vmprof_main.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.c @@ -0,0 +1,30 @@ +#ifdef VMPROF_UNIX + +#include +/* value: LSB bit is 1 if signals must be ignored; all other bits + are a counter for how many threads are currently in a signal handler */ +static long volatile signal_handler_value = 1; + +void vmprof_ignore_signals(int ignored) +{ + if (!ignored) { + __sync_fetch_and_and(&signal_handler_value, ~1L); + } else { + /* set the last bit, and wait until concurrently-running signal + handlers finish */ + while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { + usleep(1); + } + } +} + +long vmprof_enter_signal(void) +{ + return __sync_fetch_and_add(&signal_handler_value, 2L); +} + +long vmprof_exit_signal(void) +{ + return __sync_sub_and_fetch(&signal_handler_value, 2L); +} +#endif diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.h b/rpython/rlib/rvmprof/src/shared/vmprof_main.h --- a/rpython/rlib/rvmprof/src/shared/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof_main.h @@ -60,25 +60,9 @@ /************************************************************/ -/* value: last bit is 1 if signals must be ignored; all other bits - are a counter for how many threads are currently in a signal handler */ -static long volatile signal_handler_value = 1; - -RPY_EXTERN -void vmprof_ignore_signals(int ignored) -{ - if (!ignored) { - __sync_fetch_and_and(&signal_handler_value, ~1L); - } - else { - /* set the last bit, and wait until concurrently-running signal - handlers finish */ - while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { - usleep(1); - } - } -} - +RPY_EXTERN void vmprof_ignore_signals(int ignored); +RPY_EXTERN long vmprof_enter_signal(void); +RPY_EXTERN long vmprof_exit_signal(void); /* ************************************************************* * functions to write a profile file compatible with gperftools @@ -276,7 +260,7 @@ __sync_lock_release(&spinlock); #endif - long val = __sync_fetch_and_add(&signal_handler_value, 2L); + long val = vmprof_enter_signal(); if ((val & 1) == 0) { int saved_errno = errno; @@ -307,7 +291,7 @@ errno = saved_errno; } - __sync_sub_and_fetch(&signal_handler_value, 2L); + vmprof_exit_signal(); } From pypy.commits at gmail.com Fri Jul 7 10:19:16 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 07 Jul 2017 07:19:16 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Py_EnterRecursiveCall now raises RecursionError instead of generic RuntimeError (changed in 3.5) Message-ID: <595f9864.cdb0df0a.f6769.0b62@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91786:28fa40558660 Date: 2017-07-07 15:18 +0100 http://bitbucket.org/pypy/pypy/changeset/28fa40558660/ Log: Py_EnterRecursiveCall now raises RecursionError instead of generic RuntimeError (changed in 3.5) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -261,12 +261,12 @@ global limit limit += 1 if limit > 10: - raise oefmt(space.w_RuntimeError, + raise oefmt(space.w_RecursionError, "maximum recursion depth exceeded%s", rffi.charp2str(where)) return 0 from rpython.rlib.rstack import stack_almost_full if stack_almost_full(): - raise oefmt(space.w_RuntimeError, + raise oefmt(space.w_RecursionError, "maximum recursion depth exceeded%s", rffi.charp2str(where)) return 0 diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -360,9 +360,5 @@ return PyLong_FromLong(res); """),], prologue= ''' int recurse(void); ''' ) - try: - res = module.call_recursive() - except RuntimeError as e: - assert 'while calling recurse' in str(e) - else: - assert False, "expected RuntimeError" + excinfo = raises(RecursionError, module.call_recursive) + assert 'while calling recurse' in str(excinfo.value) From pypy.commits at gmail.com Fri Jul 7 16:57:05 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:05 -0700 (PDT) Subject: [pypy-commit] pypy conditional_call_value_2: close abandoned branch Message-ID: <595ff5a1.16a5df0a.fdff0.9f0a@mx.google.com> Author: Armin Rigo Branch: conditional_call_value_2 Changeset: r91787:fbbf1f30d0b1 Date: 2017-07-07 22:45 +0200 http://bitbucket.org/pypy/pypy/changeset/fbbf1f30d0b1/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:07 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:07 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: close abandoned branch Message-ID: <595ff5a3.0594df0a.4b757.1b50@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r91788:afd80811d478 Date: 2017-07-07 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/afd80811d478/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:09 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:09 -0700 (PDT) Subject: [pypy-commit] pypy vtune: close abandoned branch Message-ID: <595ff5a5.8292df0a.13fc7.1e07@mx.google.com> Author: Armin Rigo Branch: vtune Changeset: r91789:6deb21770d01 Date: 2017-07-07 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/6deb21770d01/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:11 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:11 -0700 (PDT) Subject: [pypy-commit] pypy release-4.0.x: close old release branch Message-ID: <595ff5a7.d49a1c0a.d0817.3506@mx.google.com> Author: Armin Rigo Branch: release-4.0.x Changeset: r91790:27ec4052ee55 Date: 2017-07-07 22:48 +0200 http://bitbucket.org/pypy/pypy/changeset/27ec4052ee55/ Log: close old release branch From pypy.commits at gmail.com Fri Jul 7 16:57:15 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:15 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-bootstrap: close abandoned branch Message-ID: <595ff5ab.ba85df0a.440e1.8207@mx.google.com> Author: Armin Rigo Branch: py3.3-bootstrap Changeset: r91792:fc0f322cac4c Date: 2017-07-07 22:48 +0200 http://bitbucket.org/pypy/pypy/changeset/fc0f322cac4c/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:17 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:17 -0700 (PDT) Subject: [pypy-commit] pypy release-2.6.x: close old release branch Message-ID: <595ff5ad.d5aa1c0a.7c5f6.3dab@mx.google.com> Author: Armin Rigo Branch: release-2.6.x Changeset: r91793:47b94751f3dc Date: 2017-07-07 22:49 +0200 http://bitbucket.org/pypy/pypy/changeset/47b94751f3dc/ Log: close old release branch From pypy.commits at gmail.com Fri Jul 7 16:57:18 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:18 -0700 (PDT) Subject: [pypy-commit] pypy conditional_call_value: close abandoned branch Message-ID: <595ff5ae.0594df0a.4b757.1b58@mx.google.com> Author: Armin Rigo Branch: conditional_call_value Changeset: r91794:46341afda0b7 Date: 2017-07-07 22:49 +0200 http://bitbucket.org/pypy/pypy/changeset/46341afda0b7/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:22 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:22 -0700 (PDT) Subject: [pypy-commit] pypy release-2.4.x: close old release branch Message-ID: <595ff5b2.15ae1c0a.6aa68.2d65@mx.google.com> Author: Armin Rigo Branch: release-2.4.x Changeset: r91796:29e6b0be0768 Date: 2017-07-07 22:50 +0200 http://bitbucket.org/pypy/pypy/changeset/29e6b0be0768/ Log: close old release branch From pypy.commits at gmail.com Fri Jul 7 16:57:24 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:24 -0700 (PDT) Subject: [pypy-commit] pypy shadowstack-again: close abandoned branch Message-ID: <595ff5b4.118e1c0a.c0c2d.4c54@mx.google.com> Author: Armin Rigo Branch: shadowstack-again Changeset: r91797:1d46f3a5d4c1 Date: 2017-07-07 22:50 +0200 http://bitbucket.org/pypy/pypy/changeset/1d46f3a5d4c1/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:20 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:20 -0700 (PDT) Subject: [pypy-commit] pypy release-2.5.x: close old release branch Message-ID: <595ff5b0.4eec1c0a.64493.2d3f@mx.google.com> Author: Armin Rigo Branch: release-2.5.x Changeset: r91795:fa1474eb89d9 Date: 2017-07-07 22:50 +0200 http://bitbucket.org/pypy/pypy/changeset/fa1474eb89d9/ Log: close old release branch From pypy.commits at gmail.com Fri Jul 7 16:57:26 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:26 -0700 (PDT) Subject: [pypy-commit] pypy gc-del-2: close abandoned branch Message-ID: <595ff5b6.6c8fdf0a.b44ac.a07a@mx.google.com> Author: Armin Rigo Branch: gc-del-2 Changeset: r91798:510ed6010d39 Date: 2017-07-07 22:51 +0200 http://bitbucket.org/pypy/pypy/changeset/510ed6010d39/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:28 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:28 -0700 (PDT) Subject: [pypy-commit] pypy rawmem-checking: close abandoned branch Message-ID: <595ff5b8.9fb6df0a.11e4a.ed48@mx.google.com> Author: Armin Rigo Branch: rawmem-checking Changeset: r91799:9e0af45e973c Date: 2017-07-07 22:51 +0200 http://bitbucket.org/pypy/pypy/changeset/9e0af45e973c/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:30 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:30 -0700 (PDT) Subject: [pypy-commit] pypy gc-del: close abandoned branch Message-ID: <595ff5ba.415f1c0a.2e49.2e5b@mx.google.com> Author: Armin Rigo Branch: gc-del Changeset: r91800:4887e238b946 Date: 2017-07-07 22:51 +0200 http://bitbucket.org/pypy/pypy/changeset/4887e238b946/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:32 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:32 -0700 (PDT) Subject: [pypy-commit] pypy flow-no-local-exception: close abandoned branch Message-ID: <595ff5bc.925b1c0a.517c4.3b2c@mx.google.com> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r91801:4f24b89db699 Date: 2017-07-07 22:52 +0200 http://bitbucket.org/pypy/pypy/changeset/4f24b89db699/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:33 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:33 -0700 (PDT) Subject: [pypy-commit] pypy r15-for-shadowstack: close abandoned branch Message-ID: <595ff5bd.c9b21c0a.c3fc8.4c4d@mx.google.com> Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r91802:6b2bcdd57a77 Date: 2017-07-07 22:52 +0200 http://bitbucket.org/pypy/pypy/changeset/6b2bcdd57a77/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:35 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:35 -0700 (PDT) Subject: [pypy-commit] pypy pypy3-release-2.1.x: close old release branch Message-ID: <595ff5bf.d61b1c0a.46c2b.1d05@mx.google.com> Author: Armin Rigo Branch: pypy3-release-2.1.x Changeset: r91803:864c6e8f7f48 Date: 2017-07-07 22:53 +0200 http://bitbucket.org/pypy/pypy/changeset/864c6e8f7f48/ Log: close old release branch From pypy.commits at gmail.com Fri Jul 7 16:57:37 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:37 -0700 (PDT) Subject: [pypy-commit] pypy remove-frame-force-2: close abandoned branch Message-ID: <595ff5c1.02321c0a.2e09b.20af@mx.google.com> Author: Armin Rigo Branch: remove-frame-force-2 Changeset: r91804:e1a01cbc1fbd Date: 2017-07-07 22:53 +0200 http://bitbucket.org/pypy/pypy/changeset/e1a01cbc1fbd/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:39 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:39 -0700 (PDT) Subject: [pypy-commit] pypy no-failargs: close abandoned branch Message-ID: <595ff5c3.2b97df0a.8f6f5.478d@mx.google.com> Author: Armin Rigo Branch: no-failargs Changeset: r91805:1a7218db413e Date: 2017-07-07 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/1a7218db413e/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:41 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:41 -0700 (PDT) Subject: [pypy-commit] pypy stackroot-speedup: close abandoned branch Message-ID: <595ff5c5.9fb6df0a.11e4a.ed56@mx.google.com> Author: Armin Rigo Branch: stackroot-speedup Changeset: r91806:80e1cdf627b0 Date: 2017-07-07 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/80e1cdf627b0/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:43 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:43 -0700 (PDT) Subject: [pypy-commit] pypy miniscan: close abandoned branch Message-ID: <595ff5c7.481d1c0a.4e3d.22a2@mx.google.com> Author: Armin Rigo Branch: miniscan Changeset: r91807:bdf87670821f Date: 2017-07-07 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/bdf87670821f/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:45 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:45 -0700 (PDT) Subject: [pypy-commit] pypy concurrent-marksweep: close abandoned branch Message-ID: <595ff5c9.415f1c0a.2e49.2e6d@mx.google.com> Author: Armin Rigo Branch: concurrent-marksweep Changeset: r91808:12337d65cd09 Date: 2017-07-07 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/12337d65cd09/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:46 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:46 -0700 (PDT) Subject: [pypy-commit] pypy minimark-no-mass-free: close abandoned branch Message-ID: <595ff5ca.1bbf1c0a.798d8.2937@mx.google.com> Author: Armin Rigo Branch: minimark-no-mass-free Changeset: r91809:920884707036 Date: 2017-07-07 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/920884707036/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:57:48 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:48 -0700 (PDT) Subject: [pypy-commit] pypy refactor-not-in-translator: close abandoned branch Message-ID: <595ff5cc.98b1df0a.5c02f.5412@mx.google.com> Author: Armin Rigo Branch: refactor-not-in-translator Changeset: r91810:1a280b6b65fd Date: 2017-07-07 22:55 +0200 http://bitbucket.org/pypy/pypy/changeset/1a280b6b65fd/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 16:58:11 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:11 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head fbbf1f30d0b1 on branch conditional_call_value_2 Message-ID: <595ff5e3.cead1c0a.90c97.453e@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91811:6efdcd0f0e29 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/6efdcd0f0e29/ Log: Merge closed head fbbf1f30d0b1 on branch conditional_call_value_2 From pypy.commits at gmail.com Fri Jul 7 16:58:12 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:12 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head afd80811d478 on branch remove-raisingops Message-ID: <595ff5e4.6288df0a.bc88c.7707@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91812:597410005aeb Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/597410005aeb/ Log: Merge closed head afd80811d478 on branch remove-raisingops From pypy.commits at gmail.com Fri Jul 7 16:58:14 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:14 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6deb21770d01 on branch vtune Message-ID: <595ff5e6.2b97df0a.8f6f5.47bf@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91813:e5809a1c0c50 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e5809a1c0c50/ Log: Merge closed head 6deb21770d01 on branch vtune From pypy.commits at gmail.com Fri Jul 7 16:58:16 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:16 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 27ec4052ee55 on branch release-4.0.x Message-ID: <595ff5e8.456b1c0a.c795.385e@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91814:377fb697d4e8 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/377fb697d4e8/ Log: Merge closed head 27ec4052ee55 on branch release-4.0.x From pypy.commits at gmail.com Fri Jul 7 16:58:18 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:18 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 76741d4e2330 on branch cpyext-gc-support Message-ID: <595ff5ea.81581c0a.4fbb7.2b92@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91815:4f981cc553f7 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/4f981cc553f7/ Log: Merge closed head 76741d4e2330 on branch cpyext-gc-support From pypy.commits at gmail.com Fri Jul 7 16:58:20 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:20 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head fc0f322cac4c on branch py3.3-bootstrap Message-ID: <595ff5ec.6288df0a.bc88c.771a@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91816:0c0fa756451c Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/0c0fa756451c/ Log: Merge closed head fc0f322cac4c on branch py3.3-bootstrap From pypy.commits at gmail.com Fri Jul 7 16:58:21 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:21 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 47b94751f3dc on branch release-2.6.x Message-ID: <595ff5ed.0298df0a.7257e.584d@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91817:38d2f2ce203f Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/38d2f2ce203f/ Log: Merge closed head 47b94751f3dc on branch release-2.6.x From pypy.commits at gmail.com Fri Jul 7 16:58:23 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:23 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 46341afda0b7 on branch conditional_call_value Message-ID: <595ff5ef.e386df0a.9b6dc.e209@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91818:879b75a25ba5 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/879b75a25ba5/ Log: Merge closed head 46341afda0b7 on branch conditional_call_value From pypy.commits at gmail.com Fri Jul 7 16:58:25 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:25 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head fa1474eb89d9 on branch release-2.5.x Message-ID: <595ff5f1.898c1c0a.f20f3.2fff@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91819:b021b77b3659 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/b021b77b3659/ Log: Merge closed head fa1474eb89d9 on branch release-2.5.x From pypy.commits at gmail.com Fri Jul 7 16:58:26 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:26 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 29e6b0be0768 on branch release-2.4.x Message-ID: <595ff5f2.6582df0a.2d90b.61e9@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91820:90a11a842542 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/90a11a842542/ Log: Merge closed head 29e6b0be0768 on branch release-2.4.x From pypy.commits at gmail.com Fri Jul 7 16:58:28 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:28 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1d46f3a5d4c1 on branch shadowstack-again Message-ID: <595ff5f4.e386df0a.9b6dc.e20c@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91821:01c98de97f4b Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/01c98de97f4b/ Log: Merge closed head 1d46f3a5d4c1 on branch shadowstack-again From pypy.commits at gmail.com Fri Jul 7 16:58:30 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:30 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 510ed6010d39 on branch gc-del-2 Message-ID: <595ff5f6.30a0df0a.54dbb.32f5@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91822:361fbce008c4 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/361fbce008c4/ Log: Merge closed head 510ed6010d39 on branch gc-del-2 From pypy.commits at gmail.com Fri Jul 7 16:58:32 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:32 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9e0af45e973c on branch rawmem-checking Message-ID: <595ff5f8.06b71c0a.5a7e9.202f@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91823:e9ee3b259fb2 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e9ee3b259fb2/ Log: Merge closed head 9e0af45e973c on branch rawmem-checking From pypy.commits at gmail.com Fri Jul 7 16:58:34 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:34 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4887e238b946 on branch gc-del Message-ID: <595ff5fa.0a9e1c0a.65f48.290b@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91824:583b6b33cbcf Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/583b6b33cbcf/ Log: Merge closed head 4887e238b946 on branch gc-del From pypy.commits at gmail.com Fri Jul 7 16:58:35 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:35 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4f24b89db699 on branch flow-no-local-exception Message-ID: <595ff5fb.4c3e1c0a.7496a.5445@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91825:eb92fbe6a9bd Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/eb92fbe6a9bd/ Log: Merge closed head 4f24b89db699 on branch flow-no-local-exception From pypy.commits at gmail.com Fri Jul 7 16:58:37 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:37 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6b2bcdd57a77 on branch r15-for-shadowstack Message-ID: <595ff5fd.e2a0df0a.80d0c.0e03@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91826:afca6877d09d Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/afca6877d09d/ Log: Merge closed head 6b2bcdd57a77 on branch r15-for-shadowstack From pypy.commits at gmail.com Fri Jul 7 16:58:39 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:39 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 864c6e8f7f48 on branch pypy3-release-2.1.x Message-ID: <595ff5ff.1bbf1c0a.798d8.2962@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91827:f8f8057de60f Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/f8f8057de60f/ Log: Merge closed head 864c6e8f7f48 on branch pypy3-release-2.1.x From pypy.commits at gmail.com Fri Jul 7 16:58:41 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:41 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head e1a01cbc1fbd on branch remove-frame-force-2 Message-ID: <595ff601.2196df0a.ae5d5.2fc0@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91828:38136a171c59 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/38136a171c59/ Log: Merge closed head e1a01cbc1fbd on branch remove-frame-force-2 From pypy.commits at gmail.com Fri Jul 7 16:58:43 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:43 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1a7218db413e on branch no-failargs Message-ID: <595ff603.ba85df0a.440e1.8274@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91829:8aefcade6de4 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/8aefcade6de4/ Log: Merge closed head 1a7218db413e on branch no-failargs From pypy.commits at gmail.com Fri Jul 7 16:58:44 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:44 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 80e1cdf627b0 on branch stackroot-speedup Message-ID: <595ff604.6e9adf0a.b054.677d@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91830:8b38e97a7928 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/8b38e97a7928/ Log: Merge closed head 80e1cdf627b0 on branch stackroot-speedup From pypy.commits at gmail.com Fri Jul 7 16:58:46 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:46 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head bdf87670821f on branch miniscan Message-ID: <595ff606.0387df0a.38d5e.18e6@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91831:133388d7c684 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/133388d7c684/ Log: Merge closed head bdf87670821f on branch miniscan From pypy.commits at gmail.com Fri Jul 7 16:58:48 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:48 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 12337d65cd09 on branch concurrent-marksweep Message-ID: <595ff608.8f871c0a.9e115.45c8@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91832:a1bb7e029469 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/a1bb7e029469/ Log: Merge closed head 12337d65cd09 on branch concurrent-marksweep From pypy.commits at gmail.com Fri Jul 7 16:58:50 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:50 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 920884707036 on branch minimark-no-mass-free Message-ID: <595ff60a.02af1c0a.99826.2c57@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91833:e0175e3c3810 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e0175e3c3810/ Log: Merge closed head 920884707036 on branch minimark-no-mass-free From pypy.commits at gmail.com Fri Jul 7 16:58:52 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:52 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1a280b6b65fd on branch refactor-not-in-translator Message-ID: <595ff60c.95061c0a.c6d85.28cb@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91834:92a3dc9abbfa Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/92a3dc9abbfa/ Log: Merge closed head 1a280b6b65fd on branch refactor-not-in-translator From pypy.commits at gmail.com Fri Jul 7 16:58:53 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:58:53 -0700 (PDT) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <595ff60d.4b341c0a.846e4.43fa@mx.google.com> Author: Armin Rigo Branch: closed-branches Changeset: r91835:042097216dd9 Date: 2017-07-07 22:57 +0200 http://bitbucket.org/pypy/pypy/changeset/042097216dd9/ Log: re-close this branch From pypy.commits at gmail.com Fri Jul 7 16:57:13 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 07 Jul 2017 13:57:13 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gc-support: close abandoned branch Message-ID: <595ff5a9.0594df0a.4b757.1b55@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r91791:76741d4e2330 Date: 2017-07-07 22:48 +0200 http://bitbucket.org/pypy/pypy/changeset/76741d4e2330/ Log: close abandoned branch From pypy.commits at gmail.com Fri Jul 7 20:17:47 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 07 Jul 2017 17:17:47 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix calling PyErr_Fetch() + PyErr_NormalizeException() with no exception set Message-ID: <596024ab.415f1c0a.2e49.4307@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91836:5adabc9b9848 Date: 2017-07-08 01:17 +0100 http://bitbucket.org/pypy/pypy/changeset/5adabc9b9848/ Log: Fix calling PyErr_Fetch() + PyErr_NormalizeException() with no exception set diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -95,8 +95,18 @@ not an instance of the same class. This function can be used to instantiate the class in that case. If the values are already normalized, nothing happens. The delayed normalization is implemented to improve performance.""" - operr = OperationError(from_ref(space, exc_p[0]), - from_ref(space, val_p[0])) + if exc_p[0]: + w_etype = from_ref(space, exc_p[0]) + else: + # There is no exception, so nothing to do + return + if val_p[0]: + w_evalue = from_ref(space, val_p[0]) + else: + # On CPython, PyErr_SetNone actually sets val to NULL. + # Sensible code should probably never trigger this path on PyPy, but... + w_evalue = space.w_None + operr = OperationError(w_etype, w_evalue) operr.normalize_exception(space) Py_DecRef(space, exc_p[0]) Py_DecRef(space, val_p[0]) @@ -388,9 +398,9 @@ freshly raised. This function steals the references of the arguments. To clear the exception state, pass *NULL* for all three arguments. For general rules about the three arguments, see :c:func:`PyErr_Restore`. - + .. note:: - + This function is not normally used by code that wants to handle exceptions. Rather, it can be used when code needs to save and restore the exception state temporarily. Use diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -177,6 +177,23 @@ ]) assert module.check_error() + def test_normalize_no_exception(self): + module = self.import_extension('foo', [ + ("check_error", "METH_NOARGS", + ''' + PyObject *type, *val, *tb; + PyErr_Fetch(&type, &val, &tb); + if (type != NULL) + Py_RETURN_FALSE; + if (val != NULL) + Py_RETURN_FALSE; + PyErr_NormalizeException(&type, &val, &tb); + Py_RETURN_TRUE; + ''' + ), + ]) + assert module.check_error() + def test_SetFromErrno(self): import sys if sys.platform != 'win32': From pypy.commits at gmail.com Fri Jul 7 20:19:24 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 07 Jul 2017 17:19:24 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <5960250c.c9b21c0a.c3fc8.7f1d@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91837:920958a93997 Date: 2017-07-08 01:18 +0100 http://bitbucket.org/pypy/pypy/changeset/920958a93997/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -21,3 +21,7 @@ .. branch: issue-2592 CPyext PyListObject.pop must return the value + +.. branch: cpyext-hash_notimpl + +If ``tp_hash`` is ``PyObject_HashNotImplemented``, set ``obj.__dict__['__hash__']`` to None diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -125,8 +125,18 @@ not an instance of the same class. This function can be used to instantiate the class in that case. If the values are already normalized, nothing happens. The delayed normalization is implemented to improve performance.""" - operr = OperationError(from_ref(space, exc_p[0]), - from_ref(space, val_p[0])) + if exc_p[0]: + w_etype = from_ref(space, exc_p[0]) + else: + # There is no exception, so nothing to do + return + if val_p[0]: + w_evalue = from_ref(space, val_p[0]) + else: + # On CPython, PyErr_SetNone actually sets val to NULL. + # Sensible code should probably never trigger this path on PyPy, but... + w_evalue = space.w_None + operr = OperationError(w_etype, w_evalue) operr.normalize_exception(space) Py_DecRef(space, exc_p[0]) Py_DecRef(space, val_p[0]) diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -174,6 +174,23 @@ ]) assert module.check_error() + def test_normalize_no_exception(self): + module = self.import_extension('foo', [ + ("check_error", "METH_NOARGS", + ''' + PyObject *type, *val, *tb; + PyErr_Fetch(&type, &val, &tb); + if (type != NULL) + Py_RETURN_FALSE; + if (val != NULL) + Py_RETURN_FALSE; + PyErr_NormalizeException(&type, &val, &tb); + Py_RETURN_TRUE; + ''' + ), + ]) + assert module.check_error() + def test_SetFromErrno(self): import sys if sys.platform != 'win32': diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1301,3 +1301,33 @@ Bsize = module.get_basicsize(B) assert Asize == Bsize assert Asize > basesize + + +class AppTestHashable(AppTestCpythonExtensionBase): + def test_unhashable(self): + if not self.runappdirect: + skip('pointer to function equality available' + ' only after translation') + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + obj = PyObject_New(PyObject, &Foo_Type); + return obj; + ''' + )], prologue=''' + static PyTypeObject Foo_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo", + }; + ''', more_init = ''' + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo_Type.tp_hash = PyObject_HashNotImplemented; + if (PyType_Ready(&Foo_Type) < 0) INITERROR; + ''') + obj = module.new_obj() + raises(TypeError, hash, obj) + assert type(obj).__dict__['__hash__'] is None + # this is equivalent to + from collections import Hashable + assert not isinstance(obj, Hashable) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -337,13 +337,19 @@ setattr(struct, slot_names[1], slot_func_helper) def add_operators(space, dict_w, pto): - # XXX support PyObject_HashNotImplemented + from pypy.module.cpyext.object import PyObject_HashNotImplemented + hash_not_impl = PyObject_HashNotImplemented.api_func.get_llhelper(space) for method_name, slot_names, wrapper_func, wrapper_func_kwds, doc in slotdefs_for_wrappers: if method_name in dict_w: continue offset = [rffi.offsetof(lltype.typeOf(pto).TO, slot_names[0])] if len(slot_names) == 1: func = getattr(pto, slot_names[0]) + if slot_names[0] == 'c_tp_hash': + if hash_not_impl == func: + # special case for tp_hash == PyObject_HashNotImplemented + dict_w[method_name] = space.w_None + continue else: assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) From pypy.commits at gmail.com Sat Jul 8 10:58:35 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 07:58:35 -0700 (PDT) Subject: [pypy-commit] pypy default: issue #2601 Message-ID: <5960f31b.090b1c0a.248a9.a6a5@mx.google.com> Author: Armin Rigo Branch: Changeset: r91838:016c02447548 Date: 2017-07-08 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/016c02447548/ Log: issue #2601 Fix for 'reversed(mapping object)' diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -346,7 +346,7 @@ class W_ReversedIterator(W_Root): def __init__(self, space, w_sequence): self.remaining = space.len_w(w_sequence) - 1 - if space.lookup(w_sequence, "__getitem__") is None: + if not space.issequence_w(w_sequence): raise oefmt(space.w_TypeError, "reversed() argument must be a sequence") self.w_sequence = w_sequence diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -227,6 +227,25 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + def test_reversed_user_type(self): + class X(object): + def __getitem__(self, index): + return str(index) + def __len__(self): + return 5 + assert list(reversed(X())) == ["4", "3", "2", "1", "0"] + + def test_reversed_not_for_mapping(self): + raises(TypeError, reversed, {}) + raises(TypeError, reversed, {2: 3}) + assert not hasattr(dict, '__reversed__') + + def test_reversed_type_with_no_len(self): + class X(object): + def __getitem__(self, key): + raise ValueError + raises(TypeError, reversed, X()) + class AppTestApply: def test_apply(self): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -236,10 +236,6 @@ "an internal 'del' on the dictionary failed to find " "the key") - def descr_reversed(self, space): - raise oefmt(space.w_TypeError, - "argument to reversed() must be a sequence") - def descr_copy(self, space): """D.copy() -> a shallow copy of D""" w_new = W_DictMultiObject.allocate_and_init_instance(space) @@ -517,7 +513,6 @@ __setitem__ = interp2app(W_DictMultiObject.descr_setitem), __delitem__ = interp2app(W_DictMultiObject.descr_delitem), - __reversed__ = interp2app(W_DictMultiObject.descr_reversed), copy = interp2app(W_DictMultiObject.descr_copy), items = interp2app(W_DictMultiObject.descr_items), keys = interp2app(W_DictMultiObject.descr_keys), From pypy.commits at gmail.com Sat Jul 8 11:05:30 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 08:05:30 -0700 (PDT) Subject: [pypy-commit] pypy default: Backport passing test from py3.5 Message-ID: <5960f4ba.4e921c0a.20ab6.7265@mx.google.com> Author: Armin Rigo Branch: Changeset: r91839:36058071b9d0 Date: 2017-07-08 17:01 +0200 http://bitbucket.org/pypy/pypy/changeset/36058071b9d0/ Log: Backport passing test from py3.5 diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -246,6 +246,25 @@ raise ValueError raises(TypeError, reversed, X()) + def test_reversed_length_hint(self): + lst = [1, 2, 3] + r = reversed(lst) + assert r.__length_hint__() == 3 + assert next(r) == 3 + assert r.__length_hint__() == 2 + lst.pop() + assert r.__length_hint__() == 2 + lst.pop() + assert r.__length_hint__() == 0 + raises(StopIteration, next, r) + # + r = reversed(lst) + assert r.__length_hint__() == 1 + assert next(r) == 1 + assert r.__length_hint__() == 0 + raises(StopIteration, next, r) + assert r.__length_hint__() == 0 + class AppTestApply: def test_apply(self): From pypy.commits at gmail.com Sat Jul 8 11:05:31 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 08:05:31 -0700 (PDT) Subject: [pypy-commit] pypy default: Match CPython's error message Message-ID: <5960f4bb.c7871c0a.851d4.0eb3@mx.google.com> Author: Armin Rigo Branch: Changeset: r91840:b86461902673 Date: 2017-07-08 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b86461902673/ Log: Match CPython's error message diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -348,7 +348,7 @@ self.remaining = space.len_w(w_sequence) - 1 if not space.issequence_w(w_sequence): raise oefmt(space.w_TypeError, - "reversed() argument must be a sequence") + "argument to reversed() must be a sequence") self.w_sequence = w_sequence def descr___iter__(self, space): From pypy.commits at gmail.com Sat Jul 8 11:05:33 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 08:05:33 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <5960f4bd.0ca6df0a.d9014.f462@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91841:94b78e58b886 Date: 2017-07-08 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/94b78e58b886/ Log: hg merge default diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -317,9 +317,9 @@ def __init__(self, space, w_sequence): self.remaining = space.len_w(w_sequence) - 1 - if space.lookup(w_sequence, "__getitem__") is None: + if not space.issequence_w(w_sequence): raise oefmt(space.w_TypeError, - "reversed() argument must be a sequence") + "argument to reversed() must be a sequence") self.w_sequence = w_sequence @staticmethod @@ -328,9 +328,6 @@ if w_reversed_descr is not None: w_reversed = space.get(w_reversed_descr, w_sequence) return space.call_function(w_reversed) - if not space.issequence_w(w_sequence): - raise oefmt(space.w_TypeError, - "argument to reversed() must be a sequence") self = space.allocate_instance(W_ReversedIterator, w_subtype) self.__init__(space, w_sequence) return self diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -527,8 +527,24 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) - def test_reversed_nonsequence(self): + def test_reversed_user_type(self): + class X(object): + def __getitem__(self, index): + return str(index) + def __len__(self): + return 5 + assert list(reversed(X())) == ["4", "3", "2", "1", "0"] + + def test_reversed_not_for_mapping(self): raises(TypeError, reversed, {}) + raises(TypeError, reversed, {2: 3}) + assert not hasattr(dict, '__reversed__') + + def test_reversed_type_with_no_len(self): + class X(object): + def __getitem__(self, key): + raise ValueError + raises(TypeError, reversed, X()) def test_reversed_length_hint(self): lst = [1, 2, 3] From pypy.commits at gmail.com Sat Jul 8 11:12:01 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 08:12:01 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <5960f641.5886df0a.d8fbc.52de@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91843:7888ede0c136 Date: 2017-07-08 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/7888ede0c136/ Log: hg merge default diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -539,6 +539,7 @@ raises(TypeError, reversed, {}) raises(TypeError, reversed, {2: 3}) assert not hasattr(dict, '__reversed__') + raises(TypeError, reversed, int.__dict__) def test_reversed_type_with_no_len(self): class X(object): diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -77,5 +77,6 @@ # {}.update(proxy) + class AppTestUserObjectMethodCache(AppTestUserObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} From pypy.commits at gmail.com Sat Jul 8 11:11:59 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 08:11:59 -0700 (PDT) Subject: [pypy-commit] pypy default: issue #2601 Message-ID: <5960f63f.05b61c0a.3aa8b.2a2b@mx.google.com> Author: Armin Rigo Branch: Changeset: r91842:fe1c32761823 Date: 2017-07-08 17:10 +0200 http://bitbucket.org/pypy/pypy/changeset/fe1c32761823/ Log: issue #2601 Fix for 'reversed(dictproxy)', mostly by backporting 78dee66 diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -239,6 +239,7 @@ raises(TypeError, reversed, {}) raises(TypeError, reversed, {2: 3}) assert not hasattr(dict, '__reversed__') + raises(TypeError, reversed, int.__dict__) def test_reversed_type_with_no_len(self): class X(object): diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -99,3 +99,7 @@ copy=interp2app(W_DictProxyObject.copy_w), **cmp_methods ) + +def _set_flag_map_or_seq(space): + w_type = space.gettypeobject(W_DictProxyObject.typedef) + w_type.flag_map_or_seq = 'M' diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -96,6 +96,10 @@ self._interplevel_classes[w_type] = cls self.w_text = self.w_bytes # 'space.w_text' is w_unicode on Py3 self.w_dict.flag_map_or_seq = 'M' + from pypy.objspace.std import dictproxyobject + dictproxyobject._set_flag_map_or_seq(self) + self.w_list.flag_map_or_seq = 'S' + self.w_tuple.flag_map_or_seq = 'S' self.builtin_types["NotImplemented"] = self.w_NotImplemented self.builtin_types["Ellipsis"] = self.w_Ellipsis self.w_basestring = self.builtin_types['basestring'] = \ diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -78,6 +78,8 @@ raises(TypeError, "proxy['a'] = 4") raises(TypeError, "del proxy['a']") raises(AttributeError, "proxy.clear()") + raises(TypeError, reversed, proxy) + class AppTestUserObjectMethodCache(AppTestUserObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} From pypy.commits at gmail.com Sat Jul 8 11:16:52 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 08:16:52 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Fix test Message-ID: <5960f764.028b1c0a.61c00.cdf2@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91844:93580eb8c58e Date: 2017-07-08 17:16 +0200 http://bitbucket.org/pypy/pypy/changeset/93580eb8c58e/ Log: Fix test diff --git a/pypy/module/thread/test/test_local.py b/pypy/module/thread/test/test_local.py --- a/pypy/module/thread/test/test_local.py +++ b/pypy/module/thread/test/test_local.py @@ -73,13 +73,13 @@ assert tags == ['???'] def test_local_init2(self): - import thread + import _thread class A(object): def __init__(self, n): assert n == 42 self.n = n - class X(thread._local, A): + class X(_thread._local, A): pass x = X(42) From pypy.commits at gmail.com Sat Jul 8 12:11:19 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 09:11:19 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Accept buffer objects as filenames. It often works like the Message-ID: <59610427.9aa0df0a.6f421.1609@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91845:4d0f184d43a2 Date: 2017-07-08 18:10 +0200 http://bitbucket.org/pypy/pypy/changeset/4d0f184d43a2/ Log: Accept buffer objects as filenames. It often works like the corresponding bytes object. Of course, os.listdir() is an exception to that rule. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1641,9 +1641,13 @@ return fsdecode(space, w_obj) def fsencode_w(self, w_obj): - from rpython.rlib import rstring if self.isinstance_w(w_obj, self.w_unicode): w_obj = self.fsencode(w_obj) + return self.bytesbuf0_w(w_obj) + + def bytesbuf0_w(self, w_obj): + # Like bytes0_w(), but also accept a read-only buffer. + from rpython.rlib import rstring try: result = self.bytes_w(w_obj) except OperationError as e: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -66,7 +66,7 @@ self.w_obj = w_obj def as_bytes(self): - return self.space.bytes0_w(self.w_obj) + return self.space.bytesbuf0_w(self.w_obj) def as_unicode(self): return self.space.fsdecode_w(self.w_obj) @@ -85,7 +85,7 @@ fname = FileEncoder(space, w_fname) return func(fname, *args) else: - fname = space.bytes0_w(w_fname) + fname = space.bytesbuf0_w(w_fname) return func(fname, *args) return dispatch @@ -746,7 +746,7 @@ fullpath = rposix.getfullpathname(path) w_fullpath = space.newunicode(fullpath) else: - path = space.bytes0_w(w_path) + path = space.bytesbuf0_w(w_path) fullpath = rposix.getfullpathname(path) w_fullpath = space.newbytes(fullpath) except OSError as e: @@ -931,6 +931,8 @@ if space.is_none(w_path): w_path = space.newunicode(u".") if space.isinstance_w(w_path, space.w_bytes): + # XXX CPython doesn't follow this path either if w_path is, + # for example, a memoryview or another buffer type dirname = space.bytes0_w(w_path) try: result = rposix.listdir(dirname) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -334,6 +334,14 @@ expected = b'caf%E9' if sys.platform == 'darwin' else b'caf\xe9' assert expected in result + def test_listdir_memoryview_returns_unicode(self): + # XXX unknown why CPython has this behaviour + bytes_dir = self.bytes_dir + os, posix = self.os, self.posix + result1 = posix.listdir(bytes_dir) # -> list of bytes + result2 = posix.listdir(memoryview(bytes_dir)) # -> list of unicodes + assert [os.fsencode(x) for x in result2] == result1 + def test_fdlistdir(self): posix = self.posix dirfd = posix.open('.', posix.O_RDONLY) @@ -1141,6 +1149,12 @@ with open(dest) as f: data = f.read() assert data == "who cares?" + # + posix.unlink(dest) + posix.symlink(memoryview(bytes_dir + b"/somefile"), dest) + with open(dest) as f: + data = f.read() + assert data == "who cares?" # XXX skip test if dir_fd is unsupported def test_symlink_fd(self): @@ -1293,6 +1307,15 @@ s2.close() s1.close() + def test_filename_can_be_a_buffer(self): + import posix, sys + fsencoding = sys.getfilesystemencoding() + pdir = (self.pdir + '/file1').encode(fsencoding) + fd = posix.open(pdir, posix.O_RDONLY) + posix.close(fd) + fd = posix.open(memoryview(pdir), posix.O_RDONLY) + posix.close(fd) + if sys.platform.startswith('linux'): def test_sendfile_no_offset(self): import _socket, posix From pypy.commits at gmail.com Sat Jul 8 13:21:16 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 08 Jul 2017 10:21:16 -0700 (PDT) Subject: [pypy-commit] cffi default: Apply the patch of issue 321 if __NetBSD__ is defined. Message-ID: <5961148c.02321c0a.2e09b.9a71@mx.google.com> Author: Armin Rigo Branch: Changeset: r2992:115ee1bd9bc7 Date: 2017-07-08 19:21 +0200 http://bitbucket.org/cffi/cffi/changeset/115ee1bd9bc7/ Log: Apply the patch of issue 321 if __NetBSD__ is defined. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -60,7 +60,38 @@ # endif #endif -#include "malloc_closure.h" + +/* Define the following macro ONLY if you trust libffi's version of + * ffi_closure_alloc() more than the code in malloc_closure.h. + * IMPORTANT: DO NOT ENABLE THIS ON LINUX, unless you understand exactly + * why I recommend against it and decide that you trust it more than my + * analysis below. + * + * There are two versions of this code: one inside libffi itself, and + * one inside malloc_closure.h here. Both should be fine as long as the + * Linux distribution does _not_ enable extra security features. If it + * does, then the code in malloc_closure.h will cleanly crash because + * there is no reasonable way to obtain a read-write-execute memory + * page. On the other hand, the code in libffi will appear to + * work---but will actually randomly crash after a fork() if the child + * does not immediately call exec(). This second crash is of the kind + * that can be turned into an attack vector by a motivated attacker. + * So, _enabling_ extra security features _opens_ an attack vector. + * That sounds like a horribly bad idea to me, and is the reason for why + * I prefer CFFI crashing cleanly. + * + * Currently, we use libffi's ffi_closure_alloc() only on NetBSD. It is + * known that on the NetBSD kernel, a different strategy is used which + * should not be open to the fork() bug. + */ +#ifdef __NetBSD__ +# define CFFI_TRUST_LIBFFI +#endif + +#ifndef CFFI_TRUST_LIBFFI +# include "malloc_closure.h" +#endif + #if PY_MAJOR_VERSION >= 3 # define STR_OR_BYTES "bytes" @@ -259,6 +290,11 @@ } CDataObject_gcp; typedef struct { + CDataObject head; + ffi_closure *closure; +} CDataObject_closure; + +typedef struct { ffi_cif cif; /* the following information is used when doing the call: - a buffer of size 'exchange_size' is malloced @@ -1781,10 +1817,14 @@ Py_DECREF(x); } else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ - ffi_closure *closure = (ffi_closure *)cd->c_data; + ffi_closure *closure = ((CDataObject_closure *)cd)->closure; PyObject *args = (PyObject *)(closure->user_data); Py_XDECREF(args); +#ifdef CFFI_TRUST_LIBFFI + ffi_closure_free(closure); +#else cffi_closure_free(closure); +#endif } else if (cd->c_type->ct_flags & CT_IS_UNSIZED_CHAR_A) { /* from_buffer */ Py_buffer *view = ((CDataObject_owngc_frombuf *)cd)->bufferview; @@ -1801,7 +1841,7 @@ Py_VISIT(x); } else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ - ffi_closure *closure = (ffi_closure *)cd->c_data; + ffi_closure *closure = ((CDataObject_closure *)cd)->closure; PyObject *args = (PyObject *)(closure->user_data); Py_VISIT(args); } @@ -1822,7 +1862,7 @@ Py_DECREF(x); } else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ - ffi_closure *closure = (ffi_closure *)cd->c_data; + ffi_closure *closure = ((CDataObject_closure *)cd)->closure; PyObject *args = (PyObject *)(closure->user_data); closure->user_data = NULL; Py_XDECREF(args); @@ -2036,7 +2076,8 @@ return _cdata_repr2(cd, "handle to", x); } else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ - PyObject *args = (PyObject *)((ffi_closure *)cd->c_data)->user_data; + ffi_closure *closure = ((CDataObject_closure *)cd)->closure; + PyObject *args = (PyObject *)closure->user_data; if (args == NULL) return cdata_repr(cd); else @@ -5727,11 +5768,12 @@ static PyObject *b_callback(PyObject *self, PyObject *args) { CTypeDescrObject *ct; - CDataObject *cd; + CDataObject_closure *cd; PyObject *ob, *error_ob = Py_None, *onerror_ob = Py_None; PyObject *infotuple; cif_description_t *cif_descr; ffi_closure *closure; + void *closure_exec; if (!PyArg_ParseTuple(args, "O!O|OO:callback", &CTypeDescr_Type, &ct, &ob, &error_ob, &onerror_ob)) @@ -5741,15 +5783,24 @@ if (infotuple == NULL) return NULL; +#ifdef CFFI_TRUST_LIBFFI + closure = ffi_closure_alloc(sizeof(ffi_closure), &closure_exec); +#else closure = cffi_closure_alloc(); - - cd = PyObject_GC_New(CDataObject, &CDataOwningGC_Type); + closure_exec = closure; +#endif + if (closure == NULL) { + Py_DECREF(infotuple); + return NULL; + } + cd = PyObject_GC_New(CDataObject_closure, &CDataOwningGC_Type); if (cd == NULL) goto error; Py_INCREF(ct); - cd->c_type = ct; - cd->c_data = (char *)closure; - cd->c_weakreflist = NULL; + cd->head.c_type = ct; + cd->head.c_data = (char *)closure_exec; + cd->head.c_weakreflist = NULL; + cd->closure = closure; PyObject_GC_Track(cd); cif_descr = (cif_description_t *)ct->ct_extra; @@ -5759,8 +5810,13 @@ "return type or with '...'", ct->ct_name); goto error; } +#ifdef CFFI_TRUST_LIBFFI + if (ffi_prep_closure_loc(closure, &cif_descr->cif, + invoke_callback, infotuple, closure_exec) != FFI_OK) { +#else if (ffi_prep_closure(closure, &cif_descr->cif, invoke_callback, infotuple) != FFI_OK) { +#endif PyErr_SetString(PyExc_SystemError, "libffi failed to build this callback"); goto error; @@ -5783,8 +5839,13 @@ error: closure->user_data = NULL; - if (cd == NULL) + if (cd == NULL) { +#ifdef CFFI_TRUST_LIBFFI + ffi_closure_free(closure); +#else cffi_closure_free(closure); +#endif + } else Py_DECREF(cd); Py_XDECREF(infotuple); diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -201,9 +201,5 @@ NetBSD ++++++ -Reports on NetBSD are not good. You first need to make sure you have an -up-to-date version of libffi, which fixes some bugs. However, there are -still a number of segfaults and failures running the CFFI tests (see -`issue 321`__). Contributions welcome. - -.. __: https://bitbucket.org/cffi/cffi/issues/321/cffi-191-segmentation-fault-during-self +You need to make sure you have an up-to-date version of libffi, which +fixes some bugs. diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -38,6 +38,10 @@ typically, console applications that crash close immediately, but that is also the situation where stderr should be visible anyway. +* Progress on support for `callbacks in NetBSD`__. + +.. __: https://bitbucket.org/cffi/cffi/issues/321/cffi-191-segmentation-fault-during-self + v1.10.1 ======= From pypy.commits at gmail.com Sun Jul 9 10:19:45 2017 From: pypy.commits at gmail.com (mattip) Date: Sun, 09 Jul 2017 07:19:45 -0700 (PDT) Subject: [pypy-commit] pypy default: replace getdictvalue() with lookup() to properly traverse the mro as CPython does Message-ID: <59623b81.5e361c0a.c2ee0.45e6@mx.google.com> Author: Matti Picus Branch: Changeset: r91846:26b88e87fcc8 Date: 2017-07-09 17:18 +0300 http://bitbucket.org/pypy/pypy/changeset/26b88e87fcc8/ Log: replace getdictvalue() with lookup() to properly traverse the mro as CPython does diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -466,7 +466,7 @@ ('tp_iter', '__iter__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -481,7 +481,7 @@ ('tp_as_mapping.c_mp_length', '__len__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @slot_function([PyObject], lltype.Signed, error=-1) @@ -508,7 +508,7 @@ ('tp_as_mapping.c_mp_subscript', '__getitem__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -525,7 +525,7 @@ ('tp_as_sequence.c_sq_inplace_repeat', '__imul__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -539,7 +539,7 @@ for tp_name, attr in [('tp_as_number.c_nb_power', '__pow__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -552,10 +552,10 @@ for tp_name, attr in [('tp_as_mapping.c_mp_ass_subscript', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -573,10 +573,10 @@ for tp_name, attr in [('tp_as_sequence.c_sq_ass_item', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -593,8 +593,8 @@ if handled: pass elif name == 'tp_setattro': - setattr_fn = w_type.getdictvalue(space, '__setattr__') - delattr_fn = w_type.getdictvalue(space, '__delattr__') + setattr_fn = w_type.lookup('__setattr__') + delattr_fn = w_type.lookup('__delattr__') if setattr_fn is None: return @@ -609,7 +609,7 @@ return 0 slot_func = slot_tp_setattro elif name == 'tp_getattro': - getattr_fn = w_type.getdictvalue(space, '__getattribute__') + getattr_fn = w_type.lookup('__getattribute__') if getattr_fn is None: return @@ -620,7 +620,7 @@ slot_func = slot_tp_getattro elif name == 'tp_call': - call_fn = w_type.getdictvalue(space, '__call__') + call_fn = w_type.lookup('__call__') if call_fn is None: return @@ -633,7 +633,7 @@ slot_func = slot_tp_call elif name == 'tp_iternext': - iternext_fn = w_type.getdictvalue(space, 'next') + iternext_fn = w_type.lookup('next') if iternext_fn is None: return @@ -649,7 +649,7 @@ slot_func = slot_tp_iternext elif name == 'tp_init': - init_fn = w_type.getdictvalue(space, '__init__') + init_fn = w_type.lookup('__init__') if init_fn is None: return @@ -662,7 +662,7 @@ return 0 slot_func = slot_tp_init elif name == 'tp_new': - new_fn = w_type.getdictvalue(space, '__new__') + new_fn = w_type.lookup('__new__') if new_fn is None: return @@ -674,7 +674,7 @@ return space.call_args(space.get(new_fn, w_self), args) slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': - buff_fn = w_type.getdictvalue(space, '__buffer__') + buff_fn = w_type.lookup('__buffer__') if buff_fn is not None: buff_w = slot_from___buffer__(space, typedef, buff_fn) elif typedef.buffer: @@ -683,7 +683,7 @@ return slot_func = buff_w elif name == 'tp_descr_get': - get_fn = w_type.getdictvalue(space, '__get__') + get_fn = w_type.lookup('__get__') if get_fn is None: return @@ -695,8 +695,8 @@ return space.call_function(get_fn, w_self, w_obj, w_value) slot_func = slot_tp_descr_get elif name == 'tp_descr_set': - set_fn = w_type.getdictvalue(space, '__set__') - delete_fn = w_type.getdictvalue(space, '__delete__') + set_fn = w_type.lookup('__set__') + delete_fn = w_type.lookup('__delete__') if set_fn is None and delete_fn is None: return diff --git a/pypy/module/cpyext/test/test_boolobject.py b/pypy/module/cpyext/test/test_boolobject.py --- a/pypy/module/cpyext/test/test_boolobject.py +++ b/pypy/module/cpyext/test/test_boolobject.py @@ -26,3 +26,20 @@ ]) assert module.get_true() == True assert module.get_false() == False + + def test_toint(self): + module = self.import_extension('foo', [ + ("to_int", "METH_O", + ''' + if (args->ob_type->tp_as_number && args->ob_type->tp_as_number->nb_int) { + return args->ob_type->tp_as_number->nb_int(args); + } + else { + PyErr_SetString(PyExc_TypeError,"cannot convert bool to int"); + return NULL; + } + '''), ]) + assert module.to_int(False) == 0 + assert module.to_int(True) == 1 + + From pypy.commits at gmail.com Sun Jul 9 16:29:41 2017 From: pypy.commits at gmail.com (mattip) Date: Sun, 09 Jul 2017 13:29:41 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: merge default into branch Message-ID: <59629235.0594df0a.4b757.260f@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91847:641767ad7623 Date: 2017-07-09 17:20 +0300 http://bitbucket.org/pypy/pypy/changeset/641767ad7623/ Log: merge default into branch diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -346,9 +346,9 @@ class W_ReversedIterator(W_Root): def __init__(self, space, w_sequence): self.remaining = space.len_w(w_sequence) - 1 - if space.lookup(w_sequence, "__getitem__") is None: + if not space.issequence_w(w_sequence): raise oefmt(space.w_TypeError, - "reversed() argument must be a sequence") + "argument to reversed() must be a sequence") self.w_sequence = w_sequence def descr___iter__(self, space): diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -227,6 +227,45 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + def test_reversed_user_type(self): + class X(object): + def __getitem__(self, index): + return str(index) + def __len__(self): + return 5 + assert list(reversed(X())) == ["4", "3", "2", "1", "0"] + + def test_reversed_not_for_mapping(self): + raises(TypeError, reversed, {}) + raises(TypeError, reversed, {2: 3}) + assert not hasattr(dict, '__reversed__') + raises(TypeError, reversed, int.__dict__) + + def test_reversed_type_with_no_len(self): + class X(object): + def __getitem__(self, key): + raise ValueError + raises(TypeError, reversed, X()) + + def test_reversed_length_hint(self): + lst = [1, 2, 3] + r = reversed(lst) + assert r.__length_hint__() == 3 + assert next(r) == 3 + assert r.__length_hint__() == 2 + lst.pop() + assert r.__length_hint__() == 2 + lst.pop() + assert r.__length_hint__() == 0 + raises(StopIteration, next, r) + # + r = reversed(lst) + assert r.__length_hint__() == 1 + assert next(r) == 1 + assert r.__length_hint__() == 0 + raises(StopIteration, next, r) + assert r.__length_hint__() == 0 + class AppTestApply: def test_apply(self): diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -95,8 +95,18 @@ not an instance of the same class. This function can be used to instantiate the class in that case. If the values are already normalized, nothing happens. The delayed normalization is implemented to improve performance.""" - operr = OperationError(from_ref(space, exc_p[0]), - from_ref(space, val_p[0])) + if exc_p[0]: + w_etype = from_ref(space, exc_p[0]) + else: + # There is no exception, so nothing to do + return + if val_p[0]: + w_evalue = from_ref(space, val_p[0]) + else: + # On CPython, PyErr_SetNone actually sets val to NULL. + # Sensible code should probably never trigger this path on PyPy, but... + w_evalue = space.w_None + operr = OperationError(w_etype, w_evalue) operr.normalize_exception(space) Py_DecRef(space, exc_p[0]) Py_DecRef(space, val_p[0]) @@ -388,9 +398,9 @@ freshly raised. This function steals the references of the arguments. To clear the exception state, pass *NULL* for all three arguments. For general rules about the three arguments, see :c:func:`PyErr_Restore`. - + .. note:: - + This function is not normally used by code that wants to handle exceptions. Rather, it can be used when code needs to save and restore the exception state temporarily. Use diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -466,7 +466,7 @@ ('tp_iter', '__iter__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -481,7 +481,7 @@ ('tp_as_mapping.c_mp_length', '__len__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @slot_function([PyObject], lltype.Signed, error=-1) @@ -508,7 +508,7 @@ ('tp_as_mapping.c_mp_subscript', '__getitem__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -525,7 +525,7 @@ ('tp_as_sequence.c_sq_inplace_repeat', '__imul__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -539,7 +539,7 @@ for tp_name, attr in [('tp_as_number.c_nb_power', '__pow__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -552,10 +552,10 @@ for tp_name, attr in [('tp_as_mapping.c_mp_ass_subscript', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -573,10 +573,10 @@ for tp_name, attr in [('tp_as_sequence.c_sq_ass_item', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -593,8 +593,8 @@ if handled: pass elif name == 'tp_setattro': - setattr_fn = w_type.getdictvalue(space, '__setattr__') - delattr_fn = w_type.getdictvalue(space, '__delattr__') + setattr_fn = w_type.lookup('__setattr__') + delattr_fn = w_type.lookup('__delattr__') if setattr_fn is None: return @@ -609,7 +609,7 @@ return 0 slot_func = slot_tp_setattro elif name == 'tp_getattro': - getattr_fn = w_type.getdictvalue(space, '__getattribute__') + getattr_fn = w_type.lookup('__getattribute__') if getattr_fn is None: return @@ -620,7 +620,7 @@ slot_func = slot_tp_getattro elif name == 'tp_call': - call_fn = w_type.getdictvalue(space, '__call__') + call_fn = w_type.lookup('__call__') if call_fn is None: return @@ -633,7 +633,7 @@ slot_func = slot_tp_call elif name == 'tp_iternext': - iternext_fn = w_type.getdictvalue(space, 'next') + iternext_fn = w_type.lookup('next') if iternext_fn is None: return @@ -649,7 +649,7 @@ slot_func = slot_tp_iternext elif name == 'tp_init': - init_fn = w_type.getdictvalue(space, '__init__') + init_fn = w_type.lookup('__init__') if init_fn is None: return @@ -662,7 +662,7 @@ return 0 slot_func = slot_tp_init elif name == 'tp_new': - new_fn = w_type.getdictvalue(space, '__new__') + new_fn = w_type.lookup('__new__') if new_fn is None: return @@ -674,7 +674,7 @@ return space.call_args(space.get(new_fn, w_self), args) slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': - buff_fn = w_type.getdictvalue(space, '__buffer__') + buff_fn = w_type.lookup('__buffer__') if buff_fn is not None: buff_w = slot_from___buffer__(space, typedef, buff_fn) elif typedef.buffer: @@ -683,7 +683,7 @@ return slot_func = buff_w elif name == 'tp_descr_get': - get_fn = w_type.getdictvalue(space, '__get__') + get_fn = w_type.lookup('__get__') if get_fn is None: return @@ -695,8 +695,8 @@ return space.call_function(get_fn, w_self, w_obj, w_value) slot_func = slot_tp_descr_get elif name == 'tp_descr_set': - set_fn = w_type.getdictvalue(space, '__set__') - delete_fn = w_type.getdictvalue(space, '__delete__') + set_fn = w_type.lookup('__set__') + delete_fn = w_type.lookup('__delete__') if set_fn is None and delete_fn is None: return diff --git a/pypy/module/cpyext/test/test_boolobject.py b/pypy/module/cpyext/test/test_boolobject.py --- a/pypy/module/cpyext/test/test_boolobject.py +++ b/pypy/module/cpyext/test/test_boolobject.py @@ -26,3 +26,20 @@ ]) assert module.get_true() == True assert module.get_false() == False + + def test_toint(self): + module = self.import_extension('foo', [ + ("to_int", "METH_O", + ''' + if (args->ob_type->tp_as_number && args->ob_type->tp_as_number->nb_int) { + return args->ob_type->tp_as_number->nb_int(args); + } + else { + PyErr_SetString(PyExc_TypeError,"cannot convert bool to int"); + return NULL; + } + '''), ]) + assert module.to_int(False) == 0 + assert module.to_int(True) == 1 + + diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -177,6 +177,23 @@ ]) assert module.check_error() + def test_normalize_no_exception(self): + module = self.import_extension('foo', [ + ("check_error", "METH_NOARGS", + ''' + PyObject *type, *val, *tb; + PyErr_Fetch(&type, &val, &tb); + if (type != NULL) + Py_RETURN_FALSE; + if (val != NULL) + Py_RETURN_FALSE; + PyErr_NormalizeException(&type, &val, &tb); + Py_RETURN_TRUE; + ''' + ), + ]) + assert module.check_error() + def test_SetFromErrno(self): import sys if sys.platform != 'win32': diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -236,10 +236,6 @@ "an internal 'del' on the dictionary failed to find " "the key") - def descr_reversed(self, space): - raise oefmt(space.w_TypeError, - "argument to reversed() must be a sequence") - def descr_copy(self, space): """D.copy() -> a shallow copy of D""" w_new = W_DictMultiObject.allocate_and_init_instance(space) @@ -517,7 +513,6 @@ __setitem__ = interp2app(W_DictMultiObject.descr_setitem), __delitem__ = interp2app(W_DictMultiObject.descr_delitem), - __reversed__ = interp2app(W_DictMultiObject.descr_reversed), copy = interp2app(W_DictMultiObject.descr_copy), items = interp2app(W_DictMultiObject.descr_items), keys = interp2app(W_DictMultiObject.descr_keys), diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -99,3 +99,7 @@ copy=interp2app(W_DictProxyObject.copy_w), **cmp_methods ) + +def _set_flag_map_or_seq(space): + w_type = space.gettypeobject(W_DictProxyObject.typedef) + w_type.flag_map_or_seq = 'M' diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -96,6 +96,10 @@ self._interplevel_classes[w_type] = cls self.w_text = self.w_bytes # 'space.w_text' is w_unicode on Py3 self.w_dict.flag_map_or_seq = 'M' + from pypy.objspace.std import dictproxyobject + dictproxyobject._set_flag_map_or_seq(self) + self.w_list.flag_map_or_seq = 'S' + self.w_tuple.flag_map_or_seq = 'S' self.builtin_types["NotImplemented"] = self.w_NotImplemented self.builtin_types["Ellipsis"] = self.w_Ellipsis self.w_basestring = self.builtin_types['basestring'] = \ diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -78,6 +78,8 @@ raises(TypeError, "proxy['a'] = 4") raises(TypeError, "del proxy['a']") raises(AttributeError, "proxy.clear()") + raises(TypeError, reversed, proxy) + class AppTestUserObjectMethodCache(AppTestUserObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} From pypy.commits at gmail.com Sun Jul 9 22:26:55 2017 From: pypy.commits at gmail.com (mattip) Date: Sun, 09 Jul 2017 19:26:55 -0700 (PDT) Subject: [pypy-commit] pypy default: test, fix for PyObject_Print(NULL, ...), which occurred in Numpy f2py --debug-capi Message-ID: <5962e5ef.8eb81c0a.57b8d.436e@mx.google.com> Author: Matti Picus Branch: Changeset: r91848:5a93d0b7ba23 Date: 2017-07-10 05:04 +0300 http://bitbucket.org/pypy/pypy/changeset/5a93d0b7ba23/ Log: test, fix for PyObject_Print(NULL, ...), which occurred in Numpy f2py --debug-capi diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -436,15 +436,19 @@ Py_PRINT_RAW = 1 # No string quotes etc. @cpython_api([PyObject, FILEP, rffi.INT_real], rffi.INT_real, error=-1) -def PyObject_Print(space, w_obj, fp, flags): +def PyObject_Print(space, pyobj, fp, flags): """Print an object o, on file fp. Returns -1 on error. The flags argument is used to enable certain printing options. The only option currently supported is Py_PRINT_RAW; if given, the str() of the object is written instead of the repr().""" - if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: - w_str = space.str(w_obj) + if not pyobj: + w_str = space.newtext("") else: - w_str = space.repr(w_obj) + w_obj = from_ref(space, pyobj) + if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: + w_str = space.str(w_obj) + else: + w_str = space.repr(w_obj) count = space.len_w(w_str) data = space.text_w(w_str) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -315,13 +315,20 @@ if (fp == NULL) Py_RETURN_NONE; ret = PyObject_Print(obj, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } + ret = PyObject_Print(NULL, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } fclose(fp); - if (ret < 0) - return NULL; Py_RETURN_TRUE; """)]) assert module.dump(self.tmpname, None) - assert open(self.tmpname).read() == 'None' + assert open(self.tmpname).read() == 'None' def test_issue1970(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Sun Jul 9 22:26:57 2017 From: pypy.commits at gmail.com (mattip) Date: Sun, 09 Jul 2017 19:26:57 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: merge default into branch Message-ID: <5962e5f1.02d91c0a.f8db0.099c@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91849:85522f4f43a8 Date: 2017-07-10 05:04 +0300 http://bitbucket.org/pypy/pypy/changeset/85522f4f43a8/ Log: merge default into branch diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -436,15 +436,19 @@ Py_PRINT_RAW = 1 # No string quotes etc. @cpython_api([PyObject, FILEP, rffi.INT_real], rffi.INT_real, error=-1) -def PyObject_Print(space, w_obj, fp, flags): +def PyObject_Print(space, pyobj, fp, flags): """Print an object o, on file fp. Returns -1 on error. The flags argument is used to enable certain printing options. The only option currently supported is Py_PRINT_RAW; if given, the str() of the object is written instead of the repr().""" - if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: - w_str = space.str(w_obj) + if not pyobj: + w_str = space.newtext("") else: - w_str = space.repr(w_obj) + w_obj = from_ref(space, pyobj) + if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: + w_str = space.str(w_obj) + else: + w_str = space.repr(w_obj) count = space.len_w(w_str) data = space.text_w(w_str) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -315,13 +315,20 @@ if (fp == NULL) Py_RETURN_NONE; ret = PyObject_Print(obj, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } + ret = PyObject_Print(NULL, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } fclose(fp); - if (ret < 0) - return NULL; Py_RETURN_TRUE; """)]) assert module.dump(self.tmpname, None) - assert open(self.tmpname).read() == 'None' + assert open(self.tmpname).read() == 'None' def test_issue1970(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Mon Jul 10 15:58:22 2017 From: pypy.commits at gmail.com (mattip) Date: Mon, 10 Jul 2017 12:58:22 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-add_newdoc: merge default into branch Message-ID: <5963dc5e.4c3e1c0a.7496a.9aa4@mx.google.com> Author: Matti Picus Branch: cpyext-add_newdoc Changeset: r91851:47245401f0f1 Date: 2017-07-10 22:57 +0300 http://bitbucket.org/pypy/pypy/changeset/47245401f0f1/ Log: merge default into branch diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -10,7 +10,7 @@ from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, bootstrap_function, - build_type_checkers, cpython_api, generic_cpy_call, + build_type_checkers, cpython_api, generic_cpy_call, CANNOT_FAIL, PyTypeObjectPtr, slot_function, cts) from pypy.module.cpyext.pyobject import ( Py_DecRef, from_ref, make_ref, as_pyobj, make_typedescr) @@ -113,8 +113,14 @@ "built-in method '%s' of '%s' object" % (self.name, self.w_objclass.getname(self.space))) -PyCFunction_Check, PyCFunction_CheckExact = build_type_checkers( - "CFunction", W_PyCFunctionObject) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCFunction_Check(space, w_obj): + from pypy.interpreter.function import BuiltinFunction + if w_obj is None: + return False + if isinstance(w_obj, W_PyCFunctionObject): + return True + return isinstance(w_obj, BuiltinFunction) class W_PyCClassMethodObject(W_PyCFunctionObject): w_self = None diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -93,6 +93,31 @@ assert mod.isSameFunction(mod.getarg_O) raises(SystemError, mod.isSameFunction, 1) + def test_check(self): + mod = self.import_extension('foo', [ + ('check', 'METH_O', + ''' + return PyLong_FromLong(PyCFunction_Check(args)); + '''), + ]) + from math import degrees + assert mod.check(degrees) == 1 + assert mod.check(list) == 0 + assert mod.check(sorted) == 1 + def func(): + pass + class A(object): + def meth(self): + pass + @staticmethod + def stat(): + pass + assert mod.check(func) == 0 + assert mod.check(A) == 0 + assert mod.check(A.meth) == 0 + assert mod.check(A.stat) == 0 + + class TestPyCMethodObject(BaseApiTest): def test_repr(self, space, api): """ From pypy.commits at gmail.com Mon Jul 10 15:58:20 2017 From: pypy.commits at gmail.com (mattip) Date: Mon, 10 Jul 2017 12:58:20 -0700 (PDT) Subject: [pypy-commit] pypy default: test, fix for PyCFunction_Check and builtins (there is no PyCFunction_CheckExact) Message-ID: <5963dc5c.cfad1c0a.60521.271b@mx.google.com> Author: Matti Picus Branch: Changeset: r91850:ac54af67d3c2 Date: 2017-07-10 22:56 +0300 http://bitbucket.org/pypy/pypy/changeset/ac54af67d3c2/ Log: test, fix for PyCFunction_Check and builtins (there is no PyCFunction_CheckExact) diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -10,7 +10,7 @@ from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, bootstrap_function, - build_type_checkers, cpython_api, generic_cpy_call, + build_type_checkers, cpython_api, generic_cpy_call, CANNOT_FAIL, PyTypeObjectPtr, slot_function, cts) from pypy.module.cpyext.pyobject import ( Py_DecRef, from_ref, make_ref, as_pyobj, make_typedescr) @@ -113,8 +113,14 @@ "built-in method '%s' of '%s' object" % (self.name, self.w_objclass.getname(self.space))) -PyCFunction_Check, PyCFunction_CheckExact = build_type_checkers( - "CFunction", W_PyCFunctionObject) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCFunction_Check(space, w_obj): + from pypy.interpreter.function import BuiltinFunction + if w_obj is None: + return False + if isinstance(w_obj, W_PyCFunctionObject): + return True + return isinstance(w_obj, BuiltinFunction) class W_PyCClassMethodObject(W_PyCFunctionObject): w_self = None diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -93,6 +93,31 @@ assert mod.isSameFunction(mod.getarg_O) raises(SystemError, mod.isSameFunction, 1) + def test_check(self): + mod = self.import_extension('foo', [ + ('check', 'METH_O', + ''' + return PyLong_FromLong(PyCFunction_Check(args)); + '''), + ]) + from math import degrees + assert mod.check(degrees) == 1 + assert mod.check(list) == 0 + assert mod.check(sorted) == 1 + def func(): + pass + class A(object): + def meth(self): + pass + @staticmethod + def stat(): + pass + assert mod.check(func) == 0 + assert mod.check(A) == 0 + assert mod.check(A.meth) == 0 + assert mod.check(A.stat) == 0 + + class TestPyCMethodObject(BaseApiTest): def test_repr(self, space, api): """ From pypy.commits at gmail.com Mon Jul 10 18:53:53 2017 From: pypy.commits at gmail.com (antocuni) Date: Mon, 10 Jul 2017 15:53:53 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: skeleton for my talk Message-ID: <59640581.d61b1c0a.46c2b.a5eb@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5806:539d94abb88f Date: 2017-07-10 15:46 +0200 http://bitbucket.org/pypy/extradoc/changeset/539d94abb88f/ Log: skeleton for my talk diff --git a/talk/ep2017/the-joy-of-pypy-jit/Makefile b/talk/ep2017/the-joy-of-pypy-jit/Makefile new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/Makefile @@ -0,0 +1,18 @@ +# you can find rst2beamer.py here: +# http://codespeak.net/svn/user/antocuni/bin/rst2beamer.py + +# WARNING: to work, it needs this patch for docutils +# https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 + +talk.pdf: talk.rst author.latex title.latex stylesheet.latex + python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf & + +xpdf: talk.pdf + xpdf talk.pdf & diff --git a/talk/ep2017/the-joy-of-pypy-jit/author.latex b/talk/ep2017/the-joy-of-pypy-jit/author.latex new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[PyPy: abstractions for free]{PyPy: Abstractions for free} +\author[antocuni] +{Antonio Cuni} + +\institute{EuroPython 2017} +\date{July 12 2017} diff --git a/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt b/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt @@ -0,0 +1,111 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + +.. |ok| image:: ok.png + :scale: 25% + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/ep2017/the-joy-of-pypy-jit/stylesheet.latex b/talk/ep2017/the-joy-of-pypy-jit/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/stylesheet.latex @@ -0,0 +1,10 @@ +\usetheme{Boadilla} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.pdf.info b/talk/ep2017/the-joy-of-pypy-jit/talk.pdf.info new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.pdf.info @@ -0,0 +1,11 @@ +AvailableTransitions=[Crossfade] +TransitionDuration = 100 +EstimatedDuration = 45*60 # in seconds +MinutesOnly = True + +PageProps = { + 1: { + 'reset': FirstTimeOnly, + 'progress': False, + }, +} diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -0,0 +1,18 @@ +.. include:: beamerdefs.txt + +========================================== +The joy of PyPy JIT: abstractions for free +========================================== + +About me +--------- + +- PyPy core dev + +- ``pdb++``, ``cffi``, ``vmprof``, ``capnpy``, ... + +- Consultant, trainer + +- http://antocuni.eu + + From pypy.commits at gmail.com Mon Jul 10 18:53:55 2017 From: pypy.commits at gmail.com (antocuni) Date: Mon, 10 Jul 2017 15:53:55 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: more slides and import the sobel demo Message-ID: <59640583.c68b1c0a.5162d.fda1@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5807:468dc8f9a06b Date: 2017-07-10 16:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/468dc8f9a06b/ Log: more slides and import the sobel demo diff --git a/talk/ep2017/the-joy-of-pypy-jit/Makefile b/talk/ep2017/the-joy-of-pypy-jit/Makefile --- a/talk/ep2017/the-joy-of-pypy-jit/Makefile +++ b/talk/ep2017/the-joy-of-pypy-jit/Makefile @@ -4,11 +4,10 @@ # WARNING: to work, it needs this patch for docutils # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 -talk.pdf: talk.rst author.latex title.latex stylesheet.latex +talk.pdf: talk.rst author.latex stylesheet.latex python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit - #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit - #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit pdflatex talk.latex || exit view: talk.pdf diff --git a/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt b/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt --- a/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt +++ b/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt @@ -32,6 +32,11 @@ \sout{ +.. |br| raw:: latex + + \vspace{0.3cm} + + .. closed bracket .. =========================== diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel.png b/talk/ep2017/the-joy-of-pypy-jit/sobel.png new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..b038b17180409163400f447a45bc0e863364d131 GIT binary patch [cut] diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py @@ -0,0 +1,32 @@ +import time +import pypytools +from mplayer import mplayer +import v0, v1, v2, v3 + +def bench(): + if pypytools.IS_PYPY: + max_frames = 200 + else: + max_frames = 10 + + fn = 'test.avi -benchmark' + for v in (v0, v1, v2, v3): + start = time.time() + for i, img in enumerate(mplayer(fn)): + out = v.sobel(img) + if i == max_frames: + break + end = time.time() + fps = i / (end-start) + print '%s: %.2f fps' % (v.__name__, fps) + + +if __name__ == '__main__': + try: + import pypyjit + pypyjit.set_param(trace_limit=200000) + except ImportError: + pass + + bench() + diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +import sys +import errno +from time import time +from mplayer import mplayer, view +from math import sqrt +import array +import v0, v1, v2, v3 + +def main(argv): + if len(argv) > 1: + fn = argv[1] + else: + fn = 'test.avi -benchmark' #+ ' -vf scale=640:480' + + start = start0 = time() + for fcnt, img in enumerate(mplayer(fn)): + #out = v0.sobel(img) + #out = v1.sobel(img) + #out = v2.sobel(img) + out = v3.sobel(img) + + try: + view(out) + except IOError, e: + if e.errno != errno.EPIPE: + raise + print 'Exiting' + break + + print 1.0 / (time() - start), 'fps, ', (fcnt-2) / (time() - start0), 'average fps' + start = time() + if fcnt==2: + start0 = time() + +if __name__ == '__main__': + try: + import pypyjit + pypyjit.set_param(trace_limit=200000) + except ImportError: + pass + + main(sys.argv) diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/mplayer.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/mplayer.py new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/mplayer.py @@ -0,0 +1,48 @@ +import os, re, array +from subprocess import Popen, PIPE, STDOUT + + +def mplayer(fn='tv://', options=''): + f = os.popen('mplayer -really-quiet -noframedrop ' + options + ' ' + '-vo yuv4mpeg:file=/dev/stdout 2>/dev/null Author: Antonio Cuni Branch: extradoc Changeset: r5808:e409253e059c Date: 2017-07-10 16:29 +0200 http://bitbucket.org/pypy/extradoc/changeset/e409253e059c/ Log: make it possible to benchmark a single version; add more slides diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py @@ -1,16 +1,23 @@ +import sys import time import pypytools from mplayer import mplayer import v0, v1, v2, v3 def bench(): + if len(sys.argv) == 2: + v = sys.argv[1] + versions = [globals()[v]] + else: + versions = [v0, v1, v2, v3] + if pypytools.IS_PYPY: max_frames = 200 else: max_frames = 10 fn = 'test.avi -benchmark' - for v in (v0, v1, v2, v3): + for v in versions: start = time.time() for i, img in enumerate(mplayer(fn)): out = v.sobel(img) diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py @@ -16,10 +16,10 @@ start = start0 = time() for fcnt, img in enumerate(mplayer(fn)): - #out = v0.sobel(img) + out = v0.sobel(img) #out = v1.sobel(img) #out = v2.sobel(img) - out = v3.sobel(img) + #out = v3.sobel(img) try: view(out) diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -126,3 +126,45 @@ |end_scriptsize| +Version 0, demo +--------------- + +Demo + +|pause| + +PyPy is ~23x faster. Cool. + + +Version 1 +--------- + +|scriptsize| + +.. sourcecode:: python + + def get(img, x, y): + w, h, data = img + i = x + y*w + return data[i] + + def set(img, x, y, value): + w, h, data = img + i = x + y*w + data[i] = value + + def sobel(img): + w, h, data = img + out = w, h, array.array('B', [0]) * (w*h) + for y in xrange(1, h-1): + for x in xrange(1, w-1): + dx = (-1.0 * get(img, x-1, y-1) + + 1.0 * get(img, x+1, y-1) + + -2.0 * get(img, x-1, y) + + 2.0 * get(img, x+1, y) + + -1.0 * get(img, x-1, y+1) + + 1.0 * get(img, x+1, y+1)) + dy = ... + ... + +|end_scriptsize| From pypy.commits at gmail.com Mon Jul 10 18:53:58 2017 From: pypy.commits at gmail.com (antocuni) Date: Mon, 10 Jul 2017 15:53:58 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <59640586.c5b3df0a.9a5cd.88d8@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5809:e5b2630698e6 Date: 2017-07-10 16:37 +0200 http://bitbucket.org/pypy/extradoc/changeset/e5b2630698e6/ Log: more slides diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -168,3 +168,92 @@ ... |end_scriptsize| + + +Version 2 +--------- + +|scriptsize| + +.. sourcecode:: python + + class Image(object): + + def __init__(self, width, height, data=None): + self.width = width + self.height = height + if data is None: + self.data = array.array('B', [0]) * (width*height) + else: + self.data = data + + def __getitem__(self, idx): + x, y = idx + return self.data[x + y*self.width] + + def __setitem__(self, idx, value): + x, y = idx + self.data[x + y*self.width] = value + +|end_scriptsize| + + +Version 3 +------------- + +|scriptsize| + +.. sourcecode:: python + + _Point = namedtuple('_Point', ['x', 'y']) + class Point(_Point): + def __add__(self, other): + ox, oy = other + x = self.x + ox + y = self.y + oy + return self.__class__(x, y) + + class ImageIter(object): + def __init__(self, x0, x1, y0, y1): + self.it = itertools.product(xrange(x0, x1), xrange(y0, y1)) + def __iter__(self): + return self + def next(self): + x, y = next(self.it) + return Point(x, y) + + class Image(v2.Image): + def noborder(self): + return ImageIter(1, self.width-1, 1, self.height-1) + +|end_scriptsize| + +Version 3 +------------- + +|scriptsize| + +.. sourcecode:: python + + def sobel(img): + img = Image(*img) + out = Image(img.width, img.height) + for p in img.noborder(): + dx = (-1.0 * img[p + (-1,-1)] + + 1.0 * img[p + ( 1,-1)] + + -2.0 * img[p + (-1, 0)] + + 2.0 * img[p + ( 1, 0)] + + -1.0 * img[p + (-1, 1)] + + 1.0 * img[p + ( 1, 1)]) + + dy = (-1.0 * img[p + (-1,-1)] + + -2.0 * img[p + ( 0,-1)] + + -1.0 * img[p + ( 1,-1)] + + 1.0 * img[p + (-1, 1)] + + 2.0 * img[p + ( 0, 1)] + + 1.0 * img[p + ( 1, 1)]) + + value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) + out[p] = value + +|end_scriptsize| From pypy.commits at gmail.com Mon Jul 10 18:54:00 2017 From: pypy.commits at gmail.com (antocuni) Date: Mon, 10 Jul 2017 15:54:00 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: generate some charts and show them Message-ID: <59640588.ccb21c0a.84823.ac7a@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5810:be6b6ec88341 Date: 2017-07-11 00:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/be6b6ec88341/ Log: generate some charts and show them diff --git a/talk/ep2017/the-joy-of-pypy-jit/Makefile b/talk/ep2017/the-joy-of-pypy-jit/Makefile --- a/talk/ep2017/the-joy-of-pypy-jit/Makefile +++ b/talk/ep2017/the-joy-of-pypy-jit/Makefile @@ -4,12 +4,15 @@ # WARNING: to work, it needs this patch for docutils # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 -talk.pdf: talk.rst author.latex stylesheet.latex +talk.pdf: talk.rst author.latex stylesheet.latex sobel/CPython-v0.png python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit pdflatex talk.latex || exit +sobel/CPython-v0.png: sobel/chart.py + cd sobel && python chart.py + view: talk.pdf evince talk.pdf & diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py @@ -0,0 +1,35 @@ +import numpy as np +import matplotlib.pyplot as plt + +N = 5 +CPYTHON = (4.99, 2.85, 1.62, 0.59) +PYPY = (288.67, 278.25, 276.81, 235.91) + +def draw(title, values, allvalues, color): + filename = '%s-v%d.png' % (title, len(values)-1) + ylim = max(allvalues) * 1.20 + labels = ['v%d' % i for i in range(len(values))] + extra = len(allvalues) - len(values) + values = values + (0,)*extra + labels = labels + ['']*extra + + ind = np.arange(len(values)) + width = 0.35 # the width of the bars + + fig, ax = plt.subplots() + ax.bar(ind, values, width, color=color) + ax.set_ylabel('fps') + ax.set_title(title + ' FPS') + ax.set_xticks(ind + width / 2) + ax.set_xticklabels(labels) + ax.set_ylim((0, ylim)) + print filename + plt.savefig(filename) + +for i in range(1, len(CPYTHON)+1): + draw('PyPy', PYPY[:i], PYPY, color='r') + draw('CPython', CPYTHON[:i], CPYTHON, color='b') + + +## for i, (cpy, pypy) in enumerate(zip(CPYTHON, PYPY)): +## print i, pypy/cpy diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -126,14 +126,29 @@ |end_scriptsize| -Version 0, demo +Version 0 --------------- Demo -|pause| -PyPy is ~23x faster. Cool. + +Version 0 +---------- + +|column1| + +.. image:: sobel/CPython-v0.png + :scale: 30% + +|column2| + +.. image:: sobel/PyPy-v0.png + :scale: 30% + +|end_columns| + +* PyPy is ~59x faster Version 1 @@ -169,6 +184,22 @@ |end_scriptsize| +Version 1 +---------- + +|column1| + +.. image:: sobel/CPython-v1.png + :scale: 30% + +|column2| + +.. image:: sobel/PyPy-v1.png + :scale: 30% + +|end_columns| + +* PyPy is ~97x faster Version 2 --------- @@ -197,6 +228,22 @@ |end_scriptsize| +Version 2 +---------- + +|column1| + +.. image:: sobel/CPython-v2.png + :scale: 30% + +|column2| + +.. image:: sobel/PyPy-v2.png + :scale: 30% + +|end_columns| + +* PyPy is ~170x faster Version 3 ------------- @@ -257,3 +304,21 @@ out[p] = value |end_scriptsize| + +Version 3 +---------- + +|column1| + +.. image:: sobel/CPython-v3.png + :scale: 30% + +|column2| + +.. image:: sobel/PyPy-v3.png + :scale: 30% + +|end_columns| + +* PyPy is ~400x faster + From pypy.commits at gmail.com Tue Jul 11 06:41:04 2017 From: pypy.commits at gmail.com (antocuni) Date: Tue, 11 Jul 2017 03:41:04 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <5964ab40.4eec1c0a.7ba0b.c4e0@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5811:853481996a2f Date: 2017-07-11 12:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/853481996a2f/ Log: more slides diff --git a/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt b/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt --- a/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt +++ b/talk/ep2017/the-joy-of-pypy-jit/beamerdefs.txt @@ -28,6 +28,14 @@ } +.. |tiny| raw:: latex + + {\tiny + +.. |end_tiny| raw:: latex + + } + .. |strike<| raw:: latex \sout{ diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -67,8 +67,10 @@ - etc. - PRO: readability - -- CON: speed? + +|pause| + +- CON: **cost of abstraction?** Example: Sobel filter @@ -86,11 +88,11 @@ - greyscale -- `w`, `h` +- ``w, h, data`` -- `array.array('B')` of `w * h` bytes +- ``data = array.array('B')`` of ``w * h`` bytes -- pixel `(x, y)` at index `x + w*y` +- pixel ``(x, y)`` at index ``x + w*y`` Version 0 @@ -322,3 +324,287 @@ * PyPy is ~400x faster + +The cost of abstraction +------------------------ + +* CPython + + - each version ~2-3x slower than the previous one + + - v3 is ~8.5x slower than v0 + +* PyPy + + - abstractions (almost) for free + + - v3 is ~20% slower than v0, v1, v2 + + +PyPy JIT 101 +------------- + +* What is the JIT doing? + +* Which code is optimized away? + + +Loops and guards +----------------- + +|tiny| +|column1| + +.. sourcecode:: python + + def compute(n): + total = 0 + i = 0 + while i < n: + total += i + i += 1 + return total + +|pause| +|column2| + +.. sourcecode:: python + + cdef loop0(i, n, total): + assert isinstance(n, int) + while True: + assert i < n + total = int_add_ovf(total, i) + assert not_overflow(total) + i = int_add_ovf(i, 1) + assert not_overflow(i) + +|end_columns| +|end_tiny| + +Bridges (1) +----------------- + +|tiny| +|column1| + +.. sourcecode:: python + + def compute(n): + total = 0 + i = 0 + while i < n: + if i % 2: + total += i + else: + total += (i-5) + i += 1 + return total + +|pause| +|column2| + +.. sourcecode:: python + + cdef loop0(i, n, total): + assert isinstance(n, int) + while True: + assert i < n + assert i % 2 != 0 + total = int_add_ovf(total, i) + assert not_overflow(total) + i = int_add_ovf(i, 1) + assert not_overflow(i) + +|end_columns| +|end_tiny| + +Bridges (2) +----------------- + +|tiny| +|column1| + +.. sourcecode:: python + + def compute(n): + total = 0 + i = 0 + while i < n: + if i % 2: + total += i + else: + total += (i-5) + i += 1 + return total + +|column2| + +.. sourcecode:: python + + cdef loop0(i, n, total): + assert isinstance(n, int) + while True: + assert i < n + if i % 2 != 0: + total = int_add_ovf(total, i) + assert not_overflow(total) + i = int_add_ovf(i, 1) + assert not_overflow(i) + else: + tmp = int_sub_ovf(i, 5) + assert not_overflow(tmp) + total = int_add_ovf(total, tmp) + i = int_add_ovf(i, 1) + assert not_overflow(i) + +|end_columns| +|end_tiny| + + +Inlining +----------------- + +|tiny| +|column1| + +.. sourcecode:: python + + def fn(a, b): + return a + b + + def compute(n): + total = 0 + i = 0 + while i < n: + total = fn(total, i) + i += 1 + return total + +|column2| + +.. sourcecode:: python + + assert version(globals()) == 42 + assert id(fn.__code__) == 0x1234 + # + assert isinstance(n, int) + while True: + assert i < n + total = int_add_ovf(total, i) # inlined! + assert not_overflow(total) + i = int_add_ovf(i, 1) + assert not_overflow(i) + +|end_columns| +|end_tiny| + + +Classes +----------------- + +|tiny| +|column1| + +.. sourcecode:: python + + import math + class Point(object): + def __init__(self, x, y): + self.x = x + self.y = y + + def distance(self): + return math.hypot(self.x, self.y) + + def compute(points): + total = 0 + for p in points: + total += p.distance() + return total + +|column2| + +.. sourcecode:: python + + cdef loop0(total, list_iter): + assert version(globals()) == 42 + assert version(math.__dict__) == 23 + assert version(Point.__dict__) == 56 + assert id(Point.distance.__globals__) == 0x1234 + assert version(Point.distance.__globals__) == 78 + assert id(Point.distance.__code__) == 0x5678 + + while True: + p = next(list_iter) + assert isinstance(p, Point) + # + assert isinstance(p.x, float) + assert isinstance(p.y, float) + p_x = p.x + p_y = p.y + tmp = c_call(math.hypot, p_x, p_y) + # + total = float_add(total, tmp) + +|end_columns| +|end_tiny| + + +Virtuals +----------------- + +|tiny| +|column1| + +.. sourcecode:: python + + def compute(n): + total = 0.0 + i = 0.0 + while i < n: + p = Point(i, i+1) + total += p.distance() + i += 1 + return total + +|pause| +|column2| + +.. sourcecode:: python + + assert ... + assert isinstance(n, int) + assert isinstance(i, float) + while True: + assert i < n + # Point() is "virtualized" into p_x and p_y + p_x = i + p_y = float_add(i, 1.0) + # + # inlined call to Point.hypot + tmp = c_call(math.hypot, p_x, p_y) + total = float_add(total, tmp) + +|end_columns| +|end_tiny| + + + +More PyPy at EuroPython +------------------------ + +* PyPy Help Desk + + - Tomorrow, 10:30-12:00 and 14:00-15:30 + + - Come and ask us questions! + +* "PyPy meets Python 3 and numpy" + + - Armin Rigo + + - Friday, 14:00 + +* Or, just talk to us :) + From pypy.commits at gmail.com Tue Jul 11 08:41:22 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 05:41:22 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: (fijal, arigo) add a failing test Message-ID: <5964c772.898c1c0a.f20f3.a713@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91853:98cf831a0ac4 Date: 2017-07-11 14:03 +0200 http://bitbucket.org/pypy/pypy/changeset/98cf831a0ac4/ Log: (fijal, arigo) add a failing test diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1221,6 +1221,43 @@ py.test.fail("none of the stack sizes worked") + def test_thread_and_gc_simple(self): + import time, gc + from rpython.rlib import rthread, rposix + + def bootstrap(): + rthread.gc_thread_start() + gc.collect() + rthread.gc_thread_die() + + def new_thread(): + ident = rthread.start_new_thread(bootstrap, ()) + return ident + + def entry_point(argv): + # start 5 new threads + new_thread() + new_thread() + # + gc.collect() + # + new_thread() + new_thread() + new_thread() + time.sleep(0.5) + os.write(1, "ok\n") + return 0 + + def runme(no__thread): + t, cbuilder = self.compile(entry_point, no__thread=no__thread) + data = cbuilder.cmdexec('') + assert data == 'ok\n' + + if SUPPORT__THREAD: + runme(no__thread=False) + runme(no__thread=True) + + def test_thread_and_gc(self): import time, gc from rpython.rlib import rthread, rposix From pypy.commits at gmail.com Tue Jul 11 08:41:20 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 05:41:20 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: bump OS X version Message-ID: <5964c770.c990df0a.46d1b.3bf0@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91852:da80b5357f98 Date: 2017-07-11 14:03 +0200 http://bitbucket.org/pypy/pypy/changeset/da80b5357f98/ Log: bump OS X version diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -9,7 +9,7 @@ # since 10.5, so we use that as minimum requirement. Bumped to 10.6 # because 10.11 does not ship with 10.5 versions of libs # -DARWIN_VERSION_MIN = '-mmacosx-version-min=10.6' +DARWIN_VERSION_MIN = '-mmacosx-version-min=10.7' class Darwin(posix.BasePosix): name = "darwin" From pypy.commits at gmail.com Tue Jul 11 08:41:24 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 05:41:24 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: (fijal, arigo) minimal hacks on OS X to make the test pass Message-ID: <5964c774.028b1c0a.456ae.06ce@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91854:30bf6b54473f Date: 2017-07-11 14:26 +0200 http://bitbucket.org/pypy/pypy/changeset/30bf6b54473f/ Log: (fijal, arigo) minimal hacks on OS X to make the test pass diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -25,9 +25,9 @@ IS_64_BITS = sys.maxint > 2147483647 SUPPORT__THREAD = ( # whether the particular C compiler supports __thread - sys.platform.startswith("linux")) # Linux works - # OS/X doesn't work, because we still target 10.5/10.6 and the - # minimum required version is 10.7. Windows doesn't work. Please + sys.platform.startswith("linux") or sys.platform == 'darwin') + # Linux and OS/X works. + # Windows doesn't work. Please # add other platforms here if it works on them. MAINDIR = os.path.dirname(os.path.dirname(__file__)) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -211,7 +211,7 @@ /* ------------------------------------------------------------ */ -/* #ifdef USE___THREAD XXX */ +#ifdef USE___THREAD /* ------------------------------------------------------------ */ @@ -239,8 +239,9 @@ /* /\* ------------------------------------------------------------ *\/ */ -/* #else */ + #else /* /\* ------------------------------------------------------------ *\/ */ +#error "redo this part" /* /\* this is the case where the 'struct pypy_threadlocal_s' is allocated */ @@ -272,5 +273,5 @@ /* /\* ------------------------------------------------------------ *\/ */ -/* #endif */ +#endif /* /\* ------------------------------------------------------------ *\/ */ diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1253,9 +1253,8 @@ data = cbuilder.cmdexec('') assert data == 'ok\n' - if SUPPORT__THREAD: - runme(no__thread=False) - runme(no__thread=True) + assert SUPPORT__THREAD + runme(no__thread=False) def test_thread_and_gc(self): From pypy.commits at gmail.com Tue Jul 11 08:41:26 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 05:41:26 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: add another test Message-ID: <5964c776.0298df0a.7257e.92d2@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91855:7f09446cd7df Date: 2017-07-11 14:40 +0200 http://bitbucket.org/pypy/pypy/changeset/7f09446cd7df/ Log: add another test diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -159,7 +159,7 @@ def __init__(self, ll_lock): self._lock = ll_lock - def acquire(self, flag): + def acquire(self, flag=True): if flag: c_thread_acquirelock(self._lock, 1) return True diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1256,8 +1256,71 @@ assert SUPPORT__THREAD runme(no__thread=False) + def test_thread_and_gc_medium(self): + import time, gc + from rpython.rlib import rthread, rposix - def test_thread_and_gc(self): + class State(object): + def _init_(self): + self._lock = rthread.allocate_lock() + self.counter = 0 + self.l = [] + + def append(self, item): + self._lock.acquire() + self.l.append(item) + self._lock.release() + + def get_counter(self): + self._lock.acquire() + r = self.counter + self.counter += 1 + self._lock.release() + return r + + state = State() + + class Node(object): + def __init__(self, c): + self.c = c + + def bootstrap(): + rthread.gc_thread_start() + state.append(Node(state.get_counter())) + rthread.gc_thread_die() + + def new_thread(): + ident = rthread.start_new_thread(bootstrap, ()) + return ident + + def entry_point(argv): + # start 5 new threads + state._init_() + new_thread() + new_thread() + # + gc.collect() + # + new_thread() + new_thread() + new_thread() + time.sleep(0.5) + for item in state.l: + os.write(1, str(item.c) + "\n") + os.write(1, "ok\n") + return 0 + + def runme(no__thread): + t, cbuilder = self.compile(entry_point, no__thread=no__thread) + data = cbuilder.cmdexec('') + r = data.splitlines() + r.sort() + assert r == ['0', '1', '2', '3', '4', 'ok'] + + assert SUPPORT__THREAD + runme(no__thread=False) + + def test_thread_and_gc_large(self): import time, gc from rpython.rlib import rthread, rposix from rpython.rtyper.lltypesystem import lltype From pypy.commits at gmail.com Tue Jul 11 08:48:13 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 05:48:13 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: (fijal, arigo) disable all the gil stuff Message-ID: <5964c90d.d89bdf0a.a10cf.83f5@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91856:cc04210a4bfd Date: 2017-07-11 14:47 +0200 http://bitbucket.org/pypy/pypy/changeset/cc04210a4bfd/ Log: (fijal, arigo) disable all the gil stuff diff --git a/rpython/rlib/rgil.py b/rpython/rlib/rgil.py --- a/rpython/rlib/rgil.py +++ b/rpython/rlib/rgil.py @@ -122,19 +122,21 @@ _gil_allocate() def release(): + return # this function must not raise, in such a way that the exception # transformer knows that it cannot raise! _gil_release() -release._gctransformer_hint_cannot_collect_ = True -release._dont_reach_me_in_del_ = True +#release._gctransformer_hint_cannot_collect_ = True +#release._dont_reach_me_in_del_ = True def acquire(): + return from rpython.rlib import rthread _gil_acquire() rthread.gc_thread_run() _after_thread_switch() -acquire._gctransformer_hint_cannot_collect_ = True -acquire._dont_reach_me_in_del_ = True +#acquire._gctransformer_hint_cannot_collect_ = True +#acquire._dont_reach_me_in_del_ = True # The _gctransformer_hint_cannot_collect_ hack is needed for # translations in which the *_external_call() functions are not inlined. @@ -151,10 +153,11 @@ # from rpython.rlib import rthread # rthread.gc_thread_run() # _after_thread_switch() - _gil_yield_thread() -yield_thread._gctransformer_hint_close_stack_ = True -yield_thread._dont_reach_me_in_del_ = True -yield_thread._dont_inline_ = True + return + #_gil_yield_thread() +#yield_thread._gctransformer_hint_close_stack_ = True +#yield_thread._dont_reach_me_in_del_ = True +#yield_thread._dont_inline_ = True # yield_thread() needs a different hint: _gctransformer_hint_close_stack_. # The *_external_call() functions are themselves called only from the rffi From pypy.commits at gmail.com Tue Jul 11 08:58:25 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 05:58:25 -0700 (PDT) Subject: [pypy-commit] pypy default: essential fix Message-ID: <5964cb71.05b61c0a.a2f02.cde3@mx.google.com> Author: fijal Branch: Changeset: r91857:d135217574a6 Date: 2017-07-11 14:57 +0200 http://bitbucket.org/pypy/pypy/changeset/d135217574a6/ Log: essential fix diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -639,7 +639,10 @@ addresses_of_static_ptrs = ( self.layoutbuilder.addresses_of_static_ptrs_in_nongc + self.layoutbuilder.addresses_of_static_ptrs) - log.info("found %s static roots" % (len(addresses_of_static_ptrs), )) + if len(addresses_of_static_ptrs) == 1: + log.info("found 1 static root") + else: + log.info("found %s static roots" % (len(addresses_of_static_ptrs), )) ll_static_roots_inside = lltype.malloc(lltype.Array(llmemory.Address), len(addresses_of_static_ptrs), immortal=True) From pypy.commits at gmail.com Tue Jul 11 09:09:51 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 06:09:51 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: make the test crash Message-ID: <5964ce1f.cd3f1c0a.95c6.7528@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91858:d160e8df8534 Date: 2017-07-11 15:06 +0200 http://bitbucket.org/pypy/pypy/changeset/d160e8df8534/ Log: make the test crash diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1270,6 +1270,7 @@ self._lock.acquire() self.l.append(item) self._lock.release() + append._dont_inline_ = True def get_counter(self): self._lock.acquire() @@ -1286,7 +1287,9 @@ def bootstrap(): rthread.gc_thread_start() - state.append(Node(state.get_counter())) + x = Node(state.get_counter()) + state.append(x) + state.append(x) rthread.gc_thread_die() def new_thread(): @@ -1315,7 +1318,7 @@ data = cbuilder.cmdexec('') r = data.splitlines() r.sort() - assert r == ['0', '1', '2', '3', '4', 'ok'] + assert r == ['0', '0', '1', '1', '2', '2', '3', '3', '4', '4', 'ok'] assert SUPPORT__THREAD runme(no__thread=False) From pypy.commits at gmail.com Tue Jul 11 09:10:01 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 06:10:01 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: merge default Message-ID: <5964ce29.02d91c0a.f8db0.5ff0@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91859:730ecebd4700 Date: 2017-07-11 15:09 +0200 http://bitbucket.org/pypy/pypy/changeset/730ecebd4700/ Log: merge default diff too long, truncating to 2000 out of 69054 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,6 +1,6 @@ syntax: glob *.py[co] -*.sw[po] +*.sw[pon] *~ .*.swp .idea @@ -8,6 +8,8 @@ .pydevproject __pycache__ +.cache/ +.gdb_history syntax: regexp ^testresult$ ^site-packages$ @@ -49,6 +51,11 @@ ^rpython/translator/goal/target.+-c$ ^rpython/translator/goal/.+\.exe$ ^rpython/translator/goal/.+\.dll$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/Makefile$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.guess$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.h$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.log$ +^rpython/rlib/rvmprof/src/shared/libbacktrace/config.status$ ^pypy/goal/pypy-translation-snapshot$ ^pypy/goal/pypy-c ^pypy/goal/.+\.exe$ @@ -82,3 +89,5 @@ ^rpython/_cache$ pypy/module/cppyy/.+/*\.pcm + + diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -34,3 +34,9 @@ 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 aff251e543859ce4508159dd9f1a82a2f553de00 release-pypy2.7-v5.6.0 +fa3249d55d15b9829e1be69cdf45b5a44cec902d release-pypy2.7-v5.7.0 +b16a4363e930f6401bceb499b9520955504c6cb0 release-pypy3.5-v5.7.0 +1aa2d8e03cdfab54b7121e93fda7e98ea88a30bf release-pypy2.7-v5.7.1 +2875f328eae2216a87f3d6f335092832eb031f56 release-pypy3.5-v5.7.1 +c925e73810367cd960a32592dd7f728f436c125c release-pypy2.7-v5.8.0 +a37ecfe5f142bc971a86d17305cc5d1d70abec64 release-pypy3.5-v5.8.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,5 @@ +#encoding utf-8 + License ======= @@ -37,14 +39,14 @@ Armin Rigo Maciej Fijalkowski - Carl Friedrich Bolz + Carl Friedrich Bolz-Tereick Amaury Forgeot d'Arc Antonio Cuni + Matti Picus Samuele Pedroni - Matti Picus + Ronan Lamy Alex Gaynor Philip Jenvey - Ronan Lamy Brian Kearns Richard Plangger Michael Hudson @@ -55,12 +57,12 @@ Hakan Ardo Benjamin Peterson Anders Chrigstrom + Wim Lavrijsen Eric van Riet Paap - Wim Lavrijsen Richard Emslie Alexander Schremmer + Remi Meier Dan Villiom Podlaski Christiansen - Remi Meier Lukas Diekmann Sven Hager Anders Lehmann @@ -83,8 +85,8 @@ Lawrence Oluyede Bartosz Skowron Daniel Roberts + Adrien Di Mascio Niko Matsakis - Adrien Di Mascio Alexander Hesse Ludovic Aubry Jacob Hallen @@ -99,278 +101,288 @@ Vincent Legoll Michael Foord Stephan Diehl + Stefano Rivera Stefan Schwarzer + Tomek Meka Valentino Volonghi - Tomek Meka - Stefano Rivera Patrick Maupin Devin Jeanpierre Bob Ippolito Bruno Gola David Malcolm Jean-Paul Calderone + Squeaky + Edd Barrett Timo Paulssen - Edd Barrett - Squeaky Marius Gedminas Alexandre Fayolle Simon Burton + Nicolas Truessel Martin Matusiak - Nicolas Truessel + Laurence Tratt + Wenzhu Man Konstantin Lopuhin - Wenzhu Man John Witulski - Laurence Tratt + Greg Price Ivan Sichmann Freitas - Greg Price Dario Bertini + Jeremy Thurgood Mark Pearse Simon Cross - Jeremy Thurgood + Tobias Pape Andreas Stührk - Tobias Pape Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov Paweł Piotr Przeradowski + William Leslie + marky1991 + Ilya Osadchiy + Tobias Oberstein Paul deGrandis - Ilya Osadchiy - marky1991 - Tobias Oberstein + Boris Feigin + Taavi Burns Adrian Kuhn - Boris Feigin tav - Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen Wanja Saatkamp + Mike Blume + Joannah Nanjekye Gerald Klix - Mike Blume Oscar Nierstrasz + Rami Chowdhury Stefan H. Muller - Rami Chowdhury + Tim Felgentreff Eugene Oden + Jeff Terrace Henry Mason Vasily Kuznetsov Preston Timmons David Ripton - Jeff Terrace - Tim Felgentreff Dusty Phillips Lukas Renggli Guenter Jantzen - William Leslie + Jasper Schulz Ned Batchelder + Amit Regmi Anton Gulenko - Amit Regmi - Ben Young - Jasper Schulz + Sergey Matyunin + Andrew Chambers Nicolas Chauvat Andrew Durdin - Andrew Chambers - Sergey Matyunin + Ben Young Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira Yichao Yu + Michael Twomey Rocco Moretti Gintautas Miliauskas - Michael Twomey Lucian Branescu Mihaila anatoly techtonik + Dodan Mihai + Karl Bartel Gabriel Lavoie + Jared Grubb Olivier Dormond - Jared Grubb - Karl Bartel Wouter van Heyst + Sebastian Pawluś Brian Dorsey Victor Stinner Andrews Medina - Sebastian Pawluś - Stuart Williams - Daniel Patrick Aaron Iles Toby Watson + Daniel Patrick + Stuart Williams Antoine Pitrou Christian Hudon + Justas Sadzevicius + Neil Shepperd Michael Cheng - Justas Sadzevicius + Mikael Schönenberg + Stanislaw Halik + Berkin Ilbeyi Gasper Zejn - Neil Shepperd - Stanislaw Halik - Mikael Schönenberg - Berkin Ilbeyi Faye Zhao Elmo Mäntynen - Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani + Jonathan David Riehl Beatrice During Alex Perry + p_zieschang at yahoo.de + Robert Zaremba + Alan McIntyre + Alexander Sedov Vaibhav Sood - Alan McIntyre Reuben Cummings - Alexander Sedov - p_zieschang at yahoo.de Attila Gobi + Alecsandru Patrascu Christopher Pope - Aaron Gallagher + Tristan Arthur + Christian Tismer + Dan Stromberg + Carl Meyer Florin Papa - Christian Tismer - Marc Abramowitz - Dan Stromberg - Arjun Naik + Jens-Uwe Mager Valentina Mukhamedzhanova Stefano Parmesan touilleMan + Marc Abramowitz + Arjun Naik + Aaron Gallagher Alexis Daboville - Jens-Uwe Mager - Carl Meyer + Pieter Zieschang Karl Ramm - Pieter Zieschang - Gabriel Lukas Vacek - Kunal Grover - Andrew Dalke + Omer Katz + Jacek Generowicz Sylvain Thenault Jakub Stasiak + Stefan Beyer + Andrew Dalke + Alejandro J. Cura + Vladimir Kryachko + Gabriel + Mark Williams + Kunal Grover Nathan Taylor - Vladimir Kryachko - Omer Katz - Mark Williams - Jacek Generowicz - Alejandro J. Cura + Travis Francis Athougies + Yasir Suhail + Sergey Kishchenko + Martin Blais + Lutz Paelike + Ian Foote + Philipp Rustemeuer + Catalin Gabriel Manciu Jacob Oscarson - Travis Francis Athougies Ryan Gonzalez - Ian Foote Kristjan Valur Jonsson + Lucio Torre + Richard Lancaster + Dan Buch + Lene Wagner + Tomo Cocoa David Lievens Neil Blakey-Milner - Lutz Paelike - Lucio Torre + Henrik Vendelbo Lars Wassermann - Philipp Rustemeuer - Henrik Vendelbo - Richard Lancaster - Yasir Suhail - Dan Buch + Ignas Mikalajunas + Christoph Gerum Miguel de Val Borro Artur Lisiecki - Sergey Kishchenko - Ignas Mikalajunas - Alecsandru Patrascu - Christoph Gerum - Martin Blais - Lene Wagner - Catalin Gabriel Manciu - Tomo Cocoa - Kim Jin Su - rafalgalczynski at gmail.com Toni Mattis - Amber Brown + Laurens Van Houtven + Bobby Impollonia + Roberto De Ioris + Jeong YunWon + Christopher Armstrong + Aaron Tubbs + Vasantha Ganesh K + Jason Michalski + Markus Holtermann + Andrew Thompson + Yusei Tahara + Ruochen Huang + Fabio Niephaus + Akira Li + Gustavo Niemeyer + Rafał Gałczyński + Logan Chien Lucas Stadler - Julian Berman - Markus Holtermann roberto at goyle + Matt Bogosian Yury V. Zaytsev - Anna Katrina Dominguez - Bobby Impollonia - Vasantha Ganesh K - Andrew Thompson florinpapa - Yusei Tahara - Aaron Tubbs - Ben Darnell - Roberto De Ioris - Logan Chien - Juan Francisco Cantero Hurtado - Ruochen Huang - Jeong YunWon - Godefroid Chappelle - Joshua Gilbert - Dan Colish - Christopher Armstrong - Michael Hudson-Doyle Anders Sigfridsson Nikolay Zinov - Jason Michalski + rafalgalczynski at gmail.com + Joshua Gilbert + Anna Katrina Dominguez + Kim Jin Su + Amber Brown + Nate Bragg + Ben Darnell + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Julian Berman + Michael Hudson-Doyle Floris Bruynooghe - Laurens Van Houtven - Akira Li - Gustavo Niemeyer Stephan Busemann - Rafał Gałczyński - Matt Bogosian + Dan Colish timo - Christian Muirhead - Berker Peksag - James Lan Volodymyr Vladymyrov - shoma hosaka - Ben Mather - Niclas Olofsson - Matthew Miller - Rodrigo Araújo + Daniel Neuhäuser + Flavio Percoco halgari - Boglarka Vezer - Chris Pressey - Buck Golemon - Diana Popa - Konrad Delong - Dinu Gherman + Jim Baker Chris Lambacher coolbutuseless at gmail.com + Mike Bayer + Rodrigo Araújo Daniil Yarancev - Jim Baker + OlivierBlanvillain + Jonas Pfannschmidt + Zearin + Andrey Churin Dan Crosta - Nikolaos-Digenis Karagiannis - James Robert - Armin Ronacher - Brett Cannon - Donald Stufft - yrttyr - aliceinwire - OlivierBlanvillain - Dan Sanders - Zooko Wilcox-O Hearn + reubano at gmail.com + Julien Phalip + Roman Podoliaka + Eli Stevens + Boglarka Vezer + PavloKapyshin Tomer Chachamu Christopher Groskopf Asmo Soinio - jiaaro - Mads Kiilerich Antony Lee - Jason Madden - Daniel Neuh�user - reubano at gmail.com - Yaroslav Fedevych Jim Hunziker - Markus Unterwaditzer - Even Wiik Thomassen - jbs - squeaky - soareschen - Jonas Pfannschmidt - Kurt Griffiths - Mike Bayer - Stefan Marr - Flavio Percoco - Kristoffer Kleine + shoma hosaka + Buck Golemon + Iraklis D. + JohnDoe + yrttyr Michael Chermside Anna Ravencroft + remarkablerocket + Petre Vijiac + Berker Peksag + Christian Muirhead + soareschen + Matthew Miller + Konrad Delong + Dinu Gherman pizi - remarkablerocket - Andrey Churin - Zearin - Eli Stevens - Tobias Diaz - Julien Phalip - Roman Podoliaka + James Robert + Armin Ronacher + Diana Popa + Mads Kiilerich + Brett Cannon + aliceinwire + Zooko Wilcox-O Hearn + James Lan + jiaaro + Markus Unterwaditzer + Kristoffer Kleine + Graham Markall Dan Loewenherz werat + Niclas Olofsson + Chris Pressey + Tobias Diaz + Nikolaos-Digenis Karagiannis + Kurt Griffiths + Ben Mather + Donald Stufft + Dan Sanders + Jason Madden + Yaroslav Fedevych + Even Wiik Thomassen + Stefan Marr Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -27,14 +27,19 @@ Building ======== -build with: +First switch to or download the correct branch. The basic choices are +``default`` for Python 2.7 and, for Python 3.X, the corresponding py3.X +branch (e.g. ``py3.5``). + +Build with: .. code-block:: console $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py -This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter; you can -find more details about various installation schemes here: +This ends up with a ``pypy-c`` or ``pypy3-c`` binary in the main pypy +directory. We suggest to use virtualenv with the resulting +pypy-c/pypy3-c as the interpreter; you can find more details about +various installation schemes here: http://doc.pypy.org/en/latest/install.html diff --git a/include/README b/include/README --- a/include/README +++ b/include/README @@ -1,7 +1,11 @@ This directory contains all the include files needed to build cpython extensions with PyPy. Note that these are just copies of the original headers -that are in pypy/module/cpyext/include: they are automatically copied from -there during translation. +that are in pypy/module/cpyext/{include,parse}: they are automatically copied +from there during translation. -Moreover, pypy_decl.h and pypy_macros.h are automatically generated, also -during translation. +Moreover, some pypy-specific files are automatically generated, also during +translation. Currently they are: +* pypy_decl.h +* pypy_macros.h +* pypy_numpy.h +* pypy_structmember_decl.h diff --git a/lib-python/2.7/ctypes/test/test_unaligned_structures.py b/lib-python/2.7/ctypes/test/test_unaligned_structures.py --- a/lib-python/2.7/ctypes/test/test_unaligned_structures.py +++ b/lib-python/2.7/ctypes/test/test_unaligned_structures.py @@ -37,7 +37,10 @@ for typ in byteswapped_structures: ## print >> sys.stderr, typ.value self.assertEqual(typ.value.offset, 1) - o = typ() + try: + o = typ() + except NotImplementedError as e: + self.skipTest(str(e)) # for PyPy o.value = 4 self.assertEqual(o.value, 4) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -61,12 +61,12 @@ def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} - g['CC'] = "gcc -pthread" - g['CXX'] = "g++ -pthread" + g['CC'] = "cc -pthread" + g['CXX'] = "c++ -pthread" g['OPT'] = "-DNDEBUG -O2" g['CFLAGS'] = "-DNDEBUG -O2" g['CCSHARED'] = "-fPIC" - g['LDSHARED'] = "gcc -pthread -shared" + g['LDSHARED'] = "cc -pthread -shared" g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['AR'] = "ar" g['ARFLAGS'] = "rc" diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -29,8 +29,8 @@ 'pypy': { 'stdlib': '{base}/lib-{implementation_lower}/{py_version_short}', 'platstdlib': '{base}/lib-{implementation_lower}/{py_version_short}', - 'purelib': '{base}/lib-{implementation_lower}/{py_version_short}', - 'platlib': '{base}/lib-{implementation_lower}/{py_version_short}', + 'purelib': '{base}/site-packages', + 'platlib': '{base}/site-packages', 'include': '{base}/include', 'platinclude': '{base}/include', 'scripts': '{base}/bin', @@ -369,11 +369,8 @@ def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" - # in cPython, _sysconfigdata is generated at build time, see _generate_posix_vars() - # in PyPy no such module exists - #from _sysconfigdata import build_time_vars - #vars.update(build_time_vars) - return + from _sysconfigdata import build_time_vars + vars.update(build_time_vars) def _init_non_posix(vars): """Initialize the module as appropriate for NT""" diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py --- a/lib-python/2.7/warnings.py +++ b/lib-python/2.7/warnings.py @@ -309,9 +309,12 @@ def __init__(self, message, category, filename, lineno, file=None, line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) + self.message = message + self.category = category + self.filename = filename + self.lineno = lineno + self.file = file + self.line = line self._category_name = category.__name__ if category else None def __str__(self): diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -36,9 +36,9 @@ except ImportError: def _delitem_if_value_is(d, key, value): try: - if self.data[key] is value: # fall-back: there is a potential + if d[key] is value: # fall-back: there is a potential # race condition in multithreaded programs HERE - del self.data[key] + del d[key] except KeyError: pass diff --git a/lib-python/2.7/zipfile.py b/lib-python/2.7/zipfile.py --- a/lib-python/2.7/zipfile.py +++ b/lib-python/2.7/zipfile.py @@ -622,19 +622,23 @@ """Read and return up to n bytes. If the argument is omitted, None, or negative, data is read and returned until EOF is reached.. """ - buf = '' + # PyPy modification: don't do repeated string concatenation + buf = [] + lenbuf = 0 if n is None: n = -1 while True: if n < 0: data = self.read1(n) - elif n > len(buf): - data = self.read1(n - len(buf)) + elif n > lenbuf: + data = self.read1(n - lenbuf) else: - return buf + break if len(data) == 0: - return buf - buf += data + break + lenbuf += len(data) + buf.append(data) + return "".join(buf) def _update_crc(self, newdata, eof): # Update the CRC using the given data. diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -76,17 +76,22 @@ return self._type_._alignmentofinstances() def _CData_output(self, resarray, base=None, index=-1): - # this seems to be a string if we're array of char, surprise! - from ctypes import c_char, c_wchar - if self._type_ is c_char: - return _rawffi.charp2string(resarray.buffer, self._length_) - if self._type_ is c_wchar: - return _rawffi.wcharp2unicode(resarray.buffer, self._length_) + from _rawffi.alt import types + # If a char_p or unichar_p is received, skip the string interpretation + if base._ffiargtype != types.Pointer(types.char_p) and \ + base._ffiargtype != types.Pointer(types.unichar_p): + # this seems to be a string if we're array of char, surprise! + from ctypes import c_char, c_wchar + if self._type_ is c_char: + return _rawffi.charp2string(resarray.buffer, self._length_) + if self._type_ is c_wchar: + return _rawffi.wcharp2unicode(resarray.buffer, self._length_) res = self.__new__(self) ffiarray = self._ffiarray.fromaddress(resarray.buffer, self._length_) res._buffer = ffiarray - res._base = base - res._index = index + if base is not None: + res._base = base + res._index = index return res def _CData_retval(self, resbuffer): diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -64,8 +64,9 @@ res = object.__new__(self) res.__class__ = self res.__dict__['_buffer'] = resbuffer - res.__dict__['_base'] = base - res.__dict__['_index'] = index + if base is not None: + res.__dict__['_base'] = base + res.__dict__['_index'] = index return res def _CData_retval(self, resbuffer): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -1,4 +1,3 @@ - from _ctypes.basics import _CData, _CDataMeta, cdata_from_address from _ctypes.primitive import SimpleType, _SimpleCData from _ctypes.basics import ArgumentError, keepalive_key @@ -9,13 +8,16 @@ import sys import traceback -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f + +try: + from __pypy__ import builtinify +except ImportError: + builtinify = lambda f: f # XXX this file needs huge refactoring I fear -PARAMFLAG_FIN = 0x1 -PARAMFLAG_FOUT = 0x2 +PARAMFLAG_FIN = 0x1 +PARAMFLAG_FOUT = 0x2 PARAMFLAG_FLCID = 0x4 PARAMFLAG_COMBINED = PARAMFLAG_FIN | PARAMFLAG_FOUT | PARAMFLAG_FLCID @@ -24,9 +26,9 @@ PARAMFLAG_FIN, PARAMFLAG_FIN | PARAMFLAG_FOUT, PARAMFLAG_FIN | PARAMFLAG_FLCID - ) +) -WIN64 = sys.platform == 'win32' and sys.maxint == 2**63 - 1 +WIN64 = sys.platform == 'win32' and sys.maxint == 2 ** 63 - 1 def get_com_error(errcode, riid, pIunk): @@ -35,6 +37,7 @@ from _ctypes import COMError return COMError(errcode, None, None) + @builtinify def call_function(func, args): "Only for debugging so far: So that we can call CFunction instances" @@ -94,14 +97,9 @@ "item %d in _argtypes_ has no from_param method" % ( i + 1,)) self._argtypes_ = list(argtypes) - self._check_argtypes_for_fastpath() + argtypes = property(_getargtypes, _setargtypes) - def _check_argtypes_for_fastpath(self): - if all([hasattr(argtype, '_ffiargshape_') for argtype in self._argtypes_]): - fastpath_cls = make_fastpath_subclass(self.__class__) - fastpath_cls.enable_fastpath_maybe(self) - def _getparamflags(self): return self._paramflags @@ -126,27 +124,26 @@ raise TypeError( "paramflags must be a sequence of (int [,string [,value]]) " "tuples" - ) + ) if not isinstance(flag, int): raise TypeError( "paramflags must be a sequence of (int [,string [,value]]) " "tuples" - ) + ) _flag = flag & PARAMFLAG_COMBINED if _flag == PARAMFLAG_FOUT: typ = self._argtypes_[idx] if getattr(typ, '_ffiargshape_', None) not in ('P', 'z', 'Z'): raise TypeError( "'out' parameter %d must be a pointer type, not %s" - % (idx+1, type(typ).__name__) - ) + % (idx + 1, type(typ).__name__) + ) elif _flag not in VALID_PARAMFLAGS: raise TypeError("paramflag value %d not supported" % flag) self._paramflags = paramflags paramflags = property(_getparamflags, _setparamflags) - def _getrestype(self): return self._restype_ @@ -156,7 +153,7 @@ from ctypes import c_int restype = c_int if not (isinstance(restype, _CDataMeta) or restype is None or - callable(restype)): + callable(restype)): raise TypeError("restype must be a type, a callable, or None") self._restype_ = restype @@ -168,15 +165,18 @@ def _geterrcheck(self): return getattr(self, '_errcheck_', None) + def _seterrcheck(self, errcheck): if not callable(errcheck): raise TypeError("The errcheck attribute must be callable") self._errcheck_ = errcheck + def _delerrcheck(self): try: del self._errcheck_ except AttributeError: pass + errcheck = property(_geterrcheck, _seterrcheck, _delerrcheck) def _ffishapes(self, args, restype): @@ -188,7 +188,7 @@ raise TypeError("invalid result type for callback function") restype = restype._ffiargshape_ else: - restype = 'O' # void + restype = 'O' # void return argtypes, restype def _set_address(self, address): @@ -201,7 +201,7 @@ def __init__(self, *args): self.name = None - self._objects = {keepalive_key(0):self} + self._objects = {keepalive_key(0): self} self._needs_free = True # Empty function object -- this is needed for casts @@ -222,10 +222,8 @@ if self._argtypes_ is None: self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) - self._check_argtypes_for_fastpath() return - # A callback into python if callable(argument) and not argsl: self.callable = argument @@ -259,7 +257,7 @@ if (sys.platform == 'win32' and isinstance(argument, (int, long)) and argsl): ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_) - self._com_index = argument + 0x1000 + self._com_index = argument + 0x1000 self.name = argsl.pop(0) if argsl: self.paramflags = argsl.pop(0) @@ -281,6 +279,7 @@ except SystemExit as e: handle_system_exit(e) raise + return f def __call__(self, *args, **kwargs): @@ -317,7 +316,7 @@ except: exc_info = sys.exc_info() traceback.print_tb(exc_info[2], file=sys.stderr) - print >>sys.stderr, "%s: %s" % (exc_info[0].__name__, exc_info[1]) + print >> sys.stderr, "%s: %s" % (exc_info[0].__name__, exc_info[1]) return 0 if self._restype_ is not None: return res @@ -328,7 +327,7 @@ # really slow". Now we don't worry that much about slowness # of ctypes, and it's strange to get warnings for perfectly- # legal code. - #warnings.warn('C function without declared arguments called', + # warnings.warn('C function without declared arguments called', # RuntimeWarning, stacklevel=2) argtypes = [] @@ -337,7 +336,7 @@ if not args: raise ValueError( "native COM method call without 'this' parameter" - ) + ) thisvalue = args[0] thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( @@ -366,7 +365,6 @@ return tuple(outargs) def _call_funcptr(self, funcptr, *newargs): - if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: tmp = _rawffi.get_errno() _rawffi.set_errno(get_errno()) @@ -431,7 +429,7 @@ ffiargs = [argtype.get_ffi_argtype() for argtype in argtypes] ffires = restype.get_ffi_argtype() return _ffi.FuncPtr.fromaddr(ptr, '', ffiargs, ffires, self._flags_) - + cdll = self.dll._handle try: ffi_argtypes = [argtype.get_ffi_argtype() for argtype in argtypes] @@ -450,7 +448,7 @@ # funcname -> _funcname@ # where n is 0, 4, 8, 12, ..., 128 for i in range(33): - mangled_name = "_%s@%d" % (self.name, i*4) + mangled_name = "_%s@%d" % (self.name, i * 4) try: return cdll.getfunc(mangled_name, ffi_argtypes, ffi_restype, @@ -492,7 +490,7 @@ for argtype, arg in zip(argtypes, args): param = argtype.from_param(arg) _type_ = getattr(argtype, '_type_', None) - if _type_ == 'P': # special-case for c_void_p + if _type_ == 'P': # special-case for c_void_p param = param._get_buffer_value() elif self._is_primitive(argtype): param = param.value @@ -668,69 +666,11 @@ self._needs_free = False -def make_fastpath_subclass(CFuncPtr): - if CFuncPtr._is_fastpath: - return CFuncPtr - # - try: - return make_fastpath_subclass.memo[CFuncPtr] - except KeyError: - pass - - class CFuncPtrFast(CFuncPtr): - - _is_fastpath = True - _slowpath_allowed = True # set to False by tests - - @classmethod - def enable_fastpath_maybe(cls, obj): - if (obj.callable is None and - obj._com_index is None): - obj.__class__ = cls - - def __rollback(self): - assert self._slowpath_allowed - self.__class__ = CFuncPtr - - # disable the fast path if we reset argtypes - def _setargtypes(self, argtypes): - self.__rollback() - self._setargtypes(argtypes) - argtypes = property(CFuncPtr._getargtypes, _setargtypes) - - def _setcallable(self, func): - self.__rollback() - self.callable = func - callable = property(lambda x: None, _setcallable) - - def _setcom_index(self, idx): - self.__rollback() - self._com_index = idx - _com_index = property(lambda x: None, _setcom_index) - - def __call__(self, *args): - thisarg = None - argtypes = self._argtypes_ - restype = self._restype_ - funcptr = self._getfuncptr(argtypes, restype, thisarg) - try: - result = self._call_funcptr(funcptr, *args) - result, _ = self._do_errcheck(result, args) - except (TypeError, ArgumentError, UnicodeDecodeError): - assert self._slowpath_allowed - return CFuncPtr.__call__(self, *args) - return result - - make_fastpath_subclass.memo[CFuncPtr] = CFuncPtrFast - return CFuncPtrFast -make_fastpath_subclass.memo = {} - - def handle_system_exit(e): # issue #1194: if we get SystemExit here, then exit the interpreter. # Highly obscure imho but some people seem to depend on it. if sys.flags.inspect: - return # Don't exit if -i flag was given. + return # Don't exit if -i flag was given. else: code = e.code if isinstance(code, int): diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -234,6 +234,9 @@ if ('_abstract_' in cls.__dict__ or cls is Structure or cls is union.Union): raise TypeError("abstract class") + if hasattr(cls, '_swappedbytes_'): + raise NotImplementedError("missing in PyPy: structure/union with " + "swapped (non-native) byte ordering") if hasattr(cls, '_ffistruct_'): self.__dict__['_buffer'] = self._ffistruct_(autofree=True) return self diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -8,6 +8,9 @@ from _curses_cffi import ffi, lib +version = b"2.2" +__version__ = b"2.2" + def _copy_to_globals(name): globals()[name] = getattr(lib, name) @@ -60,10 +63,6 @@ _setup() -# Do we want this? -# version = "2.2" -# __version__ = "2.2" - # ____________________________________________________________ @@ -913,101 +912,29 @@ return None -# XXX: Do something about the following? -# /* Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES -# * and _curses.COLS */ -# #if defined(HAVE_CURSES_RESIZETERM) || defined(HAVE_CURSES_RESIZE_TERM) -# static int -# update_lines_cols(void) -# { -# PyObject *o; -# PyObject *m = PyImport_ImportModuleNoBlock("curses"); +# Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES +# and _curses.COLS +def update_lines_cols(): + globals()["LINES"] = lib.LINES + globals()["COLS"] = lib.COLS + try: + m = sys.modules["curses"] + m.LINES = lib.LINES + m.COLS = lib.COLS + except (KeyError, AttributeError): + pass -# if (!m) -# return 0; -# o = PyInt_FromLong(LINES); -# if (!o) { -# Py_DECREF(m); -# return 0; -# } -# if (PyObject_SetAttrString(m, "LINES", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# if (PyDict_SetItemString(ModDict, "LINES", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# Py_DECREF(o); -# o = PyInt_FromLong(COLS); -# if (!o) { -# Py_DECREF(m); -# return 0; -# } -# if (PyObject_SetAttrString(m, "COLS", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# if (PyDict_SetItemString(ModDict, "COLS", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# Py_DECREF(o); -# Py_DECREF(m); -# return 1; -# } -# #endif +def resizeterm(lines, columns): + _ensure_initialised() + _check_ERR(lib.resizeterm(lines, columns), "resizeterm") + update_lines_cols() -# #ifdef HAVE_CURSES_RESIZETERM -# static PyObject * -# PyCurses_ResizeTerm(PyObject *self, PyObject *args) -# { -# int lines; -# int columns; -# PyObject *result; -# PyCursesInitialised; - -# if (!PyArg_ParseTuple(args,"ii:resizeterm", &lines, &columns)) -# return NULL; - -# result = PyCursesCheckERR(resizeterm(lines, columns), "resizeterm"); -# if (!result) -# return NULL; -# if (!update_lines_cols()) -# return NULL; -# return result; -# } - -# #endif - -# #ifdef HAVE_CURSES_RESIZE_TERM -# static PyObject * -# PyCurses_Resize_Term(PyObject *self, PyObject *args) -# { -# int lines; -# int columns; - -# PyObject *result; - -# PyCursesInitialised; - -# if (!PyArg_ParseTuple(args,"ii:resize_term", &lines, &columns)) -# return NULL; - -# result = PyCursesCheckERR(resize_term(lines, columns), "resize_term"); -# if (!result) -# return NULL; -# if (!update_lines_cols()) -# return NULL; -# return result; -# } -# #endif /* HAVE_CURSES_RESIZE_TERM */ +def resize_term(lines, columns): + _ensure_initialised() + _check_ERR(lib.resize_term(lines, columns), "resize_term") + update_lines_cols() def setsyx(y, x): diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -87,6 +87,13 @@ static const chtype A_CHARTEXT; static const chtype A_COLOR; +static const chtype A_HORIZONTAL; +static const chtype A_LEFT; +static const chtype A_LOW; +static const chtype A_RIGHT; +static const chtype A_TOP; +static const chtype A_VERTICAL; + static const int BUTTON1_RELEASED; static const int BUTTON1_PRESSED; static const int BUTTON1_CLICKED; @@ -202,6 +209,8 @@ int resetty(void); int reset_prog_mode(void); int reset_shell_mode(void); +int resizeterm(int, int); +int resize_term(int, int); int savetty(void); int scroll(WINDOW *); int scrollok(WINDOW *, bool); diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py --- a/lib_pypy/_pypy_winbase_build.py +++ b/lib_pypy/_pypy_winbase_build.py @@ -79,10 +79,20 @@ BOOL WINAPI CreateProcessA(char *, char *, void *, void *, BOOL, DWORD, char *, char *, LPSTARTUPINFO, LPPROCESS_INFORMATION); +BOOL WINAPI CreateProcessW(wchar_t *, wchar_t *, void *, + void *, BOOL, DWORD, wchar_t *, + wchar_t *, LPSTARTUPINFO, LPPROCESS_INFORMATION); DWORD WINAPI WaitForSingleObject(HANDLE, DWORD); BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD); BOOL WINAPI TerminateProcess(HANDLE, UINT); HANDLE WINAPI GetStdHandle(DWORD); +DWORD WINAPI GetModuleFileNameW(HANDLE, wchar_t *, DWORD); + +UINT WINAPI SetErrorMode(UINT); +#define SEM_FAILCRITICALERRORS 0x0001 +#define SEM_NOGPFAULTERRORBOX 0x0002 +#define SEM_NOALIGNMENTFAULTEXCEPT 0x0004 +#define SEM_NOOPENFILEERRORBOX 0x8000 """) # -------------------- diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py --- a/lib_pypy/_pypy_winbase_cffi.py +++ b/lib_pypy/_pypy_winbase_cffi.py @@ -3,8 +3,8 @@ ffi = _cffi_backend.FFI('_pypy_winbase_cffi', _version = 0x2601, - _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01', - _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0), - _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')), - _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'), + _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x64\x03\x00\x00\x13\x11\x00\x00\x67\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x63\x03\x00\x00\x62\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x5B\x03\x00\x00\x39\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x39\x11\x00\x00\x39\x11\x00\x00\x1B\x11\x00\x00\x1C\x11\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x29\x0D\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x39\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x56\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x56\x0D\x00\x00\x00\x0F\x00\x00\x56\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x66\x03\x00\x00\x04\x01\x00\x00\x00\x01', + _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x38\x23CreateProcessW',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x60\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x4E\x23GetModuleFileNameW',0,b'\x00\x00\x5D\x23GetStdHandle',0,b'\x00\x00\x53\x23GetVersion',0,b'\xFF\xFF\xFF\x1FSEM_FAILCRITICALERRORS',1,b'\xFF\xFF\xFF\x1FSEM_NOALIGNMENTFAULTEXCEPT',4,b'\xFF\xFF\xFF\x1FSEM_NOGPFAULTERRORBOX',2,b'\xFF\xFF\xFF\x1FSEM_NOOPENFILEERRORBOX',32768,b'\x00\x00\x47\x23SetErrorMode',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x4A\x23WaitForSingleObject',0,b'\x00\x00\x44\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x58\x23_getwch',0,b'\x00\x00\x58\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x5A\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x55\x23_ungetwch',0), + _struct_unions = ((b'\x00\x00\x00\x62\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x63\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x56\x11wShowWindow',b'\x00\x00\x56\x11cbReserved2',b'\x00\x00\x65\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')), + _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x62PROCESS_INFORMATION',b'\x00\x00\x00\x63STARTUPINFO',b'\x00\x00\x00\x56wint_t'), ) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -31,10 +31,11 @@ import weakref from threading import _get_ident as _thread_get_ident try: - from __pypy__ import newlist_hint + from __pypy__ import newlist_hint, add_memory_pressure except ImportError: assert '__pypy__' not in sys.builtin_module_names newlist_hint = lambda sizehint: [] + add_memory_pressure = lambda size: None if sys.version_info[0] >= 3: StandardError = Exception @@ -150,6 +151,9 @@ def connect(database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): factory = Connection if not factory else factory + # an sqlite3 db seems to be around 100 KiB at least (doesn't matter if + # backed by :memory: or a file) + add_memory_pressure(100 * 1024) return factory(database, timeout, detect_types, isolation_level, check_same_thread, factory, cached_statements) diff --git a/lib_pypy/_sysconfigdata.py b/lib_pypy/_sysconfigdata.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_sysconfigdata.py @@ -0,0 +1,5 @@ +import imp + +build_time_vars = { + "SO": [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] +} diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -431,7 +431,14 @@ self.append(obj) def find_class(self, module, name): - # Subclasses may override this + if self.find_global is None: + raise UnpicklingError( + "Global and instance pickles are not supported.") + return self.find_global(module, name) + + def find_global(self, module, name): + # This can officially be patched directly in the Unpickler + # instance, according to the docs __import__(module) mod = sys.modules[module] klass = getattr(mod, name) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.10.0 +Version: 1.11.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI from .error import CDefError, FFIError, VerificationError, VerificationMissing -__version__ = "1.10.0" -__version_info__ = (1, 10, 0) +__version__ = "1.11.0" +__version_info__ = (1, 11, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_errors.h b/lib_pypy/cffi/_cffi_errors.h new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_cffi_errors.h @@ -0,0 +1,145 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " of.write(x)\n" + " self.buf += x\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -8,7 +8,7 @@ the same works for the other two macros. Py_DEBUG implies them, but not the other way around. */ -#ifndef _CFFI_USE_EMBEDDING +#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API) # include # if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) # define Py_LIMITED_API @@ -159,9 +159,9 @@ #define _cffi_from_c_struct \ ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18]) #define _cffi_to_c_wchar_t \ - ((wchar_t(*)(PyObject *))_cffi_exports[19]) + ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19]) #define _cffi_from_c_wchar_t \ - ((PyObject *(*)(wchar_t))_cffi_exports[20]) + ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20]) #define _cffi_to_c_long_double \ ((long double(*)(PyObject *))_cffi_exports[21]) #define _cffi_to_c__Bool \ @@ -174,7 +174,11 @@ #define _CFFI_CPIDX 25 #define _cffi_call_python \ ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) -#define _CFFI_NUM_EXPORTS 26 +#define _cffi_to_c_wchar3216_t \ + ((int(*)(PyObject *))_cffi_exports[26]) +#define _cffi_from_c_wchar3216_t \ + ((PyObject *(*)(int))_cffi_exports[27]) +#define _CFFI_NUM_EXPORTS 28 struct _cffi_ctypedescr; @@ -215,6 +219,46 @@ return NULL; } + +#ifdef HAVE_WCHAR_H +typedef wchar_t _cffi_wchar_t; +#else +typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */ +#endif + +_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 2) + return (uint16_t)_cffi_to_c_wchar_t(o); + else + return (uint16_t)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x) +{ + if (sizeof(_cffi_wchar_t) == 2) + return _cffi_from_c_wchar_t(x); + else + return _cffi_from_c_wchar3216_t(x); +} + +_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 4) + return (int)_cffi_to_c_wchar_t(o); + else + return (int)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x) +{ + if (sizeof(_cffi_wchar_t) == 4) + return _cffi_from_c_wchar_t(x); + else + return _cffi_from_c_wchar3216_t(x); +} + + /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -109,6 +109,8 @@ /********** CPython-specific section **********/ #ifndef PYPY_VERSION +#include "_cffi_errors.h" + #define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] @@ -220,8 +222,16 @@ /* Print as much information as potentially useful. Debugging load-time failures with embedding is not fun */ + PyObject *ecap; PyObject *exception, *v, *tb, *f, *modules, *mod; PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + if (exception != NULL) { PyErr_NormalizeException(&exception, &v, &tb); PyErr_Display(exception, v, tb); @@ -230,10 +240,9 @@ Py_XDECREF(v); Py_XDECREF(tb); - f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.10.0" + "\ncompiled with cffi version: 1.11.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); @@ -249,6 +258,7 @@ PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); PyFile_WriteString("\n\n", f); } + _cffi_stop_error_capture(ecap); } result = -1; goto done; diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -75,9 +75,10 @@ self._init_once_cache = {} self._cdef_version = None self._embedding = None + self._typecache = model.get_typecache(backend) if hasattr(backend, 'set_ffi'): backend.set_ffi(self) - for name in backend.__dict__: + for name in list(backend.__dict__): if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # @@ -570,7 +571,10 @@ # we need 'libpypy-c.{so,dylib}', which should be by # default located in 'sys.prefix/bin' for installed # systems. - pythonlib = "pypy-c" + if sys.version_info < (3,): + pythonlib = "pypy-c" + else: + pythonlib = "pypy3-c" if hasattr(sys, 'prefix'): ensure('library_dirs', os.path.join(sys.prefix, 'bin')) # On uninstalled pypy's, the libpypy-c is typically found in @@ -756,21 +760,30 @@ def _load_backend_lib(backend, name, flags): + import os if name is None: if sys.platform != "win32": return backend.load_library(None, flags) name = "c" # Windows: load_library(None) fails, but this works - # (backward compatibility hack only) - try: - if '.' not in name and '/' not in name: - raise OSError("library not found: %r" % (name,)) - return backend.load_library(name, flags) - except OSError: - import ctypes.util - path = ctypes.util.find_library(name) - if path is None: - raise # propagate the original OSError - return backend.load_library(path, flags) + # on Python 2 (backward compatibility hack only) + first_error = None + if '.' in name or '/' in name or os.sep in name: + try: + return backend.load_library(name, flags) + except OSError as e: + first_error = e + import ctypes.util + path = ctypes.util.find_library(name) + if path is None: + if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): + raise OSError("dlopen(None) cannot work on Windows for Python 3 " + "(see http://bugs.python.org/issue23606)") + msg = ("ctypes.util.find_library() did not manage " + "to locate a library called %r" % (name,)) + if first_error is not None: + msg = "%s. Additionally, %s" % (first_error, msg) + raise OSError(msg) + return backend.load_library(path, flags) def _make_ffi_library(ffi, libname, flags): backend = ffi._backend diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -105,8 +105,12 @@ PRIM_UINT_FAST64 = 45 PRIM_INTMAX = 46 PRIM_UINTMAX = 47 +PRIM_FLOATCOMPLEX = 48 +PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 -_NUM_PRIM = 48 +_NUM_PRIM = 52 _UNKNOWN_PRIM = -1 _UNKNOWN_FLOAT_PRIM = -2 _UNKNOWN_LONG_DOUBLE = -3 @@ -128,8 +132,12 @@ 'float': PRIM_FLOAT, 'double': PRIM_DOUBLE, 'long double': PRIM_LONGDOUBLE, + 'float _Complex': PRIM_FLOATCOMPLEX, + 'double _Complex': PRIM_DOUBLECOMPLEX, '_Bool': PRIM_BOOL, 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, 'int8_t': PRIM_INT8, 'uint8_t': PRIM_UINT8, 'int16_t': PRIM_INT16, diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -16,6 +16,7 @@ except ImportError: lock = None +CDEF_SOURCE_STRING = "" _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", re.DOTALL | re.MULTILINE) _r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" @@ -34,6 +35,9 @@ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") +_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+" + r"\.\.\.") +_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.") def _get_parser(): global _parser_cache @@ -180,6 +184,10 @@ assert csource[p:p+3] == '...' csource = '%s __dotdotdot%d__ %s' % (csource[:p], number, csource[p+3:]) + # Replace "int ..." or "unsigned long int..." with "__dotdotdotint__" + csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource) + # Replace "float ..." or "double..." with "__dotdotdotfloat__" + csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource) # Replace all remaining "..." with the same name, "__dotdotdot__", # which is declared with a typedef for the purpose of C parsing. return csource.replace('...', ' __dotdotdot__ '), macros @@ -251,14 +259,21 @@ ctn.discard(name) typenames += sorted(ctn) # - csourcelines = ['typedef int %s;' % typename for typename in typenames] - csourcelines.append('typedef int __dotdotdot__;') + csourcelines = [] + csourcelines.append('# 1 ""') + for typename in typenames: + csourcelines.append('typedef int %s;' % typename) + csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' + ' __dotdotdot__;') + # this forces pycparser to consider the following in the file + # called from line 1 + csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) csourcelines.append(csource) - csource = '\n'.join(csourcelines) + fullcsource = '\n'.join(csourcelines) if lock is not None: lock.acquire() # pycparser is not thread-safe... try: - ast = _get_parser().parse(csource) + ast = _get_parser().parse(fullcsource) except pycparser.c_parser.ParseError as e: self.convert_pycparser_error(e, csource) finally: @@ -268,17 +283,17 @@ return ast, macros, csource def _convert_pycparser_error(self, e, csource): - # xxx look for ":NUM:" at the start of str(e) and try to interpret - # it as a line number + # xxx look for ":NUM:" at the start of str(e) + # and interpret that as a line number. This will not work if + # the user gives explicit ``# NUM "FILE"`` directives. line = None msg = str(e) - if msg.startswith(':') and ':' in msg[1:]: - linenum = msg[1:msg.find(':',1)] - if linenum.isdigit(): - linenum = int(linenum, 10) - csourcelines = csource.splitlines() - if 1 <= linenum <= len(csourcelines): - line = csourcelines[linenum-1] + match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) + if match: + linenum = int(match.group(1), 10) + csourcelines = csource.splitlines() + if 1 <= linenum <= len(csourcelines): + line = csourcelines[linenum-1] return line def convert_pycparser_error(self, e, csource): @@ -311,10 +326,14 @@ for decl in iterator: if decl.name == '__dotdotdot__': break + else: + assert 0 + current_decl = None # try: self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: + current_decl = decl if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) elif isinstance(decl, pycparser.c_ast.Typedef): @@ -322,15 +341,15 @@ raise CDefError("typedef does not declare any name", decl) quals = 0 - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names[-1] == '__dotdotdot__'): + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and + decl.type.type.names[-1].startswith('__dotdotdot')): realtype = self._get_unknown_type(decl) elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and isinstance(decl.type.type.type, pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + decl.type.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_ptr_type(decl) else: realtype, quals = self._get_type_and_quals( decl.type, name=decl.name, partial_length_ok=True) @@ -338,7 +357,13 @@ elif decl.__class__.__name__ == 'Pragma': pass # skip pragma, only in pycparser 2.15 else: - raise CDefError("unrecognized construct", decl) + raise CDefError("unexpected <%s>: this construct is valid " + "C but not valid in cdef()" % + decl.__class__.__name__, decl) + except CDefError as e: + if len(e.args) == 1: + e.args = e.args + (current_decl,) + raise except FFIError as e: msg = self._convert_pycparser_error(e, csource) if msg: @@ -793,6 +818,16 @@ "the actual array length in this context" % exprnode.coord.line) # + if (isinstance(exprnode, pycparser.c_ast.BinaryOp) and + exprnode.op == '+'): + return (self._parse_constant(exprnode.left) + + self._parse_constant(exprnode.right)) + # + if (isinstance(exprnode, pycparser.c_ast.BinaryOp) and + exprnode.op == '-'): + return (self._parse_constant(exprnode.left) - + self._parse_constant(exprnode.right)) + # raise FFIError(":%d: unsupported expression: expected a " "simple numeric constant" % exprnode.coord.line) @@ -832,24 +867,25 @@ def _get_unknown_type(self, decl): typenames = decl.type.type.names - assert typenames[-1] == '__dotdotdot__' - if len(typenames) == 1: + if typenames == ['__dotdotdot__']: return model.unknown_type(decl.name) - if (typenames[:-1] == ['float'] or - typenames[:-1] == ['double']): - # not for 'long double' so far - result = model.UnknownFloatType(decl.name) - else: - for t in typenames[:-1]: - if t not in ['int', 'short', 'long', 'signed', - 'unsigned', 'char']: - raise FFIError(':%d: bad usage of "..."' % - decl.coord.line) - result = model.UnknownIntegerType(decl.name) + if typenames == ['__dotdotdotint__']: + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef int... %s'" % decl.name + return model.UnknownIntegerType(decl.name) - if self._uses_new_feature is None: - self._uses_new_feature = "'typedef %s... %s'" % ( - ' '.join(typenames[:-1]), decl.name) + if typenames == ['__dotdotdotfloat__']: + # note: not for 'long double' so far + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef float... %s'" % decl.name + return model.UnknownFloatType(decl.name) - return result + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) + + def _get_unknown_ptr_type(self, decl): + if decl.type.type.type.names == ['__dotdotdot__']: + return model.unknown_ptr_type(decl.name) + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) diff --git a/lib_pypy/cffi/error.py b/lib_pypy/cffi/error.py --- a/lib_pypy/cffi/error.py +++ b/lib_pypy/cffi/error.py @@ -5,10 +5,13 @@ class CDefError(Exception): def __str__(self): try: - line = 'line %d: ' % (self.args[1].coord.line,) + current_decl = self.args[1] + filename = current_decl.coord.file + linenum = current_decl.coord.line + prefix = '%s:%d: ' % (filename, linenum) except (AttributeError, TypeError, IndexError): - line = '' - return '%s%s' % (line, self.args[0]) + prefix = '' + return '%s%s' % (prefix, self.args[0]) class VerificationError(Exception): """ An error raised when verification fails diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -6,6 +6,7 @@ 'extra_objects', 'depends'] def get_extension(srcfilename, modname, sources=(), **kwds): + _hack_at_distutils() from distutils.core import Extension allsources = [srcfilename] for src in sources: @@ -15,6 +16,7 @@ def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" + _hack_at_distutils() saved_environ = os.environ.copy() try: outputfilename = _build(tmpdir, ext, compiler_verbose, debug) @@ -113,3 +115,13 @@ f = cStringIO.StringIO() _flatten(x, f) return f.getvalue() + +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": From pypy.commits at gmail.com Tue Jul 11 10:19:46 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 07:19:46 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: (arigo, fijal) fix the merge Message-ID: <5964de82.8292df0a.36c27.07c8@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91860:fe57861ced6a Date: 2017-07-11 15:18 +0200 http://bitbucket.org/pypy/pypy/changeset/fe57861ced6a/ Log: (arigo, fijal) fix the merge diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -174,7 +174,7 @@ self.thread_setup = thread_setup self.thread_run_ptr = getfn(thread_run, [], annmodel.s_None, - inline=True, minimal_transform=False) + minimal_transform=False) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None, minimal_transform=False) @@ -182,6 +182,16 @@ from rpython.rlib import _stacklet_shadowstack _stacklet_shadowstack.complete_destrptr(gctransformer) + def postprocess_graph(self, gct, graph, any_inlining): + from rpython.memory.gctransform import shadowcolor + if any_inlining: + shadowcolor.postprocess_inlining(graph) + use_push_pop = shadowcolor.postprocess_graph(graph, gct.c_const_gcdata) + if use_push_pop and graph in gct.graphs_to_inline: + log.WARNING("%r is marked for later inlining, " + "but is using push/pop roots. Disabled" % (graph,)) + del gct.graphs_to_inline[graph] + class ShadowStackRootWalker(BaseRootWalker): def __init__(self, gctransformer): diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -446,7 +446,7 @@ def enter_roots_frame(self, funcgen, (c_gcdata, c_numcolors)): numcolors = c_numcolors.value # XXX hard-code the field name here - gcpol_ss = '%s->gcd_inst_root_stack_top' % funcgen.expr(c_gcdata) + gcpol_ss = 'pypy_threadlocal.shadowstack_top' # yield ('typedef struct { void %s; } pypy_ss_t;' % ', '.join(['*s%d' % i for i in range(numcolors)])) From pypy.commits at gmail.com Tue Jul 11 10:19:48 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 07:19:48 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: make sure we allocate shadow stack Message-ID: <5964de84.01571c0a.65eba.f717@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91861:17d925b4fab8 Date: 2017-07-11 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/17d925b4fab8/ Log: make sure we allocate shadow stack diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -152,6 +152,13 @@ if tl_shadowstack.get_or_make_raw() == llmemory.NULL: allocate_shadow_stack() + + def thread_start(): + allocate_shadow_stack() + tl_synclock.get_or_make_raw() # reference the field at least once + + thread_start._always_inline_ = True + def allocate_shadow_stack(): root_stack_depth = 163840 root_stack_size = sizeofaddr * root_stack_depth @@ -175,6 +182,8 @@ self.thread_setup = thread_setup self.thread_run_ptr = getfn(thread_run, [], annmodel.s_None, minimal_transform=False) + self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None, + minimal_transform=False) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None, minimal_transform=False) From pypy.commits at gmail.com Tue Jul 11 11:03:00 2017 From: pypy.commits at gmail.com (fijal) Date: Tue, 11 Jul 2017 08:03:00 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: (fijal, arigo) fix walking stack roots Message-ID: <5964e8a4.848ddf0a.1cb38.1577@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91862:ac0c77ebd152 Date: 2017-07-11 17:02 +0200 http://bitbucket.org/pypy/pypy/changeset/ac0c77ebd152/ Log: (fijal, arigo) fix walking stack roots diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -418,7 +418,8 @@ def __init__(self, config=None): "Basic initialization of objects." self.fromcache = InternalSpaceCache(self).getorbuild - self.threadlocals = ThreadLocals() + self.threadlocals = ThreadLocals() # this value is replaces + # if compiled with threads enabled, see pypy/module/thread/__init__.py # set recursion limit # sets all the internal descriptors if config is None: diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -96,14 +96,8 @@ return top self.decr_stack = decr_stack - def walk_stack_root(callback, start, end): - gc = self.gc - addr = end - while addr != start: - addr -= sizeofaddr - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - self.rootstackhook = walk_stack_root + self.invoke_collect_stack_root = specialize.call_location()( + lambda arg0, arg1, addr: arg0(self.gc, addr)) from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop def walk_thread_stack(collect_stack_root, tl): @@ -117,7 +111,9 @@ # without barriers) return debug_print("walk_stack", base, top) - self.rootstackhook(collect_stack_root, base, top) + walk_stack_root(self.invoke_collect_stack_root, collect_stack_root, + None, base, top, is_minor=False) + self._walk_thread_stack = walk_thread_stack def push_stack(self, addr): @@ -152,13 +148,6 @@ if tl_shadowstack.get_or_make_raw() == llmemory.NULL: allocate_shadow_stack() - - def thread_start(): - allocate_shadow_stack() - tl_synclock.get_or_make_raw() # reference the field at least once - - thread_start._always_inline_ = True - def allocate_shadow_stack(): root_stack_depth = 163840 root_stack_size = sizeofaddr * root_stack_depth @@ -182,7 +171,7 @@ self.thread_setup = thread_setup self.thread_run_ptr = getfn(thread_run, [], annmodel.s_None, minimal_transform=False) - self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None, + self.thread_start_ptr = getfn(thread_setup, [], annmodel.s_None, minimal_transform=False) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None, minimal_transform=False) From pypy.commits at gmail.com Tue Jul 11 11:07:21 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:21 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Update trx length on commit and abort only Message-ID: <5964e9a9.9aa0df0a.a2907.1896@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2094:1de39189c503 Date: 2017-07-10 16:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/1de39189c503/ Log: Update trx length on commit and abort only diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -347,7 +347,6 @@ } if (thread_local_for_logging != NULL) { - stm_transaction_length_handle_validation(thread_local_for_logging, needs_abort); stop_timer_and_publish_for_thread( thread_local_for_logging, STM_DURATION_VALIDATION); } @@ -1383,6 +1382,8 @@ s_mutex_unlock(); + stm_transaction_length_handle_validation(thread_local_for_logging, false); + stop_timer_and_publish_for_thread( thread_local_for_logging, STM_DURATION_COMMIT_EXCEPT_GC); @@ -1554,6 +1555,8 @@ did_abort = 1; #endif + stm_transaction_length_handle_validation(pseg->pub.running_thread, true); + list_clear(pseg->objects_pointing_to_nursery); list_clear(pseg->old_objects_with_cards_set); LIST_FOREACH_R(pseg->large_overflow_objects, uintptr_t /*item*/, From pypy.commits at gmail.com Tue Jul 11 11:07:23 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:23 -0700 (PDT) Subject: [pypy-commit] stmgc c8-efficient-serial-execution: Message-ID: <5964e9ab.3486df0a.7cbb1.188c@mx.google.com> Author: Tobias Weber Branch: c8-efficient-serial-execution Changeset: r2095:b61ffbe4c4a1 Date: 2017-07-10 16:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/b61ffbe4c4a1/ Log: From pypy.commits at gmail.com Tue Jul 11 11:07:19 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:19 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Increase minimum trx length Message-ID: <5964e9a7.0f9ddf0a.30a67.1587@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2093:273315d7d544 Date: 2017-07-10 16:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/273315d7d544/ Log: Increase minimum trx length diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -21,8 +21,8 @@ #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000L // #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L -// corresponds to ~7 bytes nursery fill -#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.0000000001) +// corresponds to ~700 bytes nursery fill +#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.00000001) static double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { const int multiplier = 100; @@ -550,7 +550,7 @@ pseg->pub.nursery_mark -= nursery_used; if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Tue Jul 11 11:07:26 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:26 -0700 (PDT) Subject: [pypy-commit] stmgc c8-fix-starvation: Fetch detached trx more often during wait to become inevitable Message-ID: <5964e9ae.c288df0a.d1bb5.10bb@mx.google.com> Author: Tobias Weber Branch: c8-fix-starvation Changeset: r2097:c9b8371e72df Date: 2017-07-10 17:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/c9b8371e72df/ Log: Fetch detached trx more often during wait to become inevitable diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1571,7 +1571,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = 1; timing_become_inevitable(); @@ -1582,42 +1582,42 @@ if (msg != MSG_INEV_DONT_SLEEP) { dprintf(("become_inevitable: %s\n", msg)); - if (any_soon_finished_or_inevitable_thread_segment() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment()) { #if STM_TESTS /* for tests: another transaction */ stm_abort_transaction(); /* is already inevitable, abort */ #endif + signal_commit_to_inevitable_transaction(); + s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested()) { - - signal_commit_to_inevitable_transaction(); + !safe_point_requested() && + num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { - s_mutex_unlock(); - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - goto retry_from_start; + if (cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + num_waits++; } - num_waits++; } s_mutex_unlock(); + /* XXX try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); + } goto retry_from_start; } else { EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) { + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); goto retry_from_start; } } diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -501,7 +501,7 @@ pseg->pub.nursery_mark -= nursery_used; if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Tue Jul 11 11:07:28 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:28 -0700 (PDT) Subject: [pypy-commit] stmgc c8-efficient-serial-execution-master: Add some commentary to enable/disable atomic Message-ID: <5964e9b0.9386df0a.37387.1910@mx.google.com> Author: Tobias Weber Branch: c8-efficient-serial-execution-master Changeset: r2098:6da61052f101 Date: 2017-07-10 17:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/6da61052f101/ Log: Add some commentary to enable/disable atomic diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -229,14 +229,18 @@ return STM_PSEGMENT->atomic_nesting_levels; } +// max intptr_t value is 7FFFFFFFFFFFFFFF on 64-bit => larger than 2 * huge value #define HUGE_INTPTR_VALUE 0x3000000000000000L void stm_enable_atomic(stm_thread_local_t *tl) { if (!stm_is_atomic(tl)) { + // do for outermost atomic block only tl->self_or_0_if_atomic = 0; /* increment 'nursery_mark' by HUGE_INTPTR_VALUE, so that - stm_should_break_transaction() returns always false */ + stm_should_break_transaction() returns always false. + preserves the previous nursery_mark, unless it is < 0 + or >= huge value */ intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark; if (mark < 0) mark = 0; @@ -256,6 +260,7 @@ STM_PSEGMENT->atomic_nesting_levels--; if (STM_PSEGMENT->atomic_nesting_levels == 0) { + // revert changes by stm_enable_atomic only if we left the outermost atomic block tl->self_or_0_if_atomic = (intptr_t)tl; /* decrement 'nursery_mark' by HUGE_INTPTR_VALUE, to cancel what was done in stm_enable_atomic() */ From pypy.commits at gmail.com Tue Jul 11 11:07:24 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:24 -0700 (PDT) Subject: [pypy-commit] stmgc c8-efficient-serial-execution-master: Merge signal commit to inevitable transactions Message-ID: <5964e9ac.8a441c0a.417b6.415e@mx.google.com> Author: Tobias Weber Branch: c8-efficient-serial-execution-master Changeset: r2096:3990ee687ca5 Date: 2017-07-10 17:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/3990ee687ca5/ Log: Merge signal commit to inevitable transactions diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -366,6 +366,14 @@ static void readd_wb_executed_flags(void); static void check_all_write_barrier_flags(char *segbase, struct list_s *list); +static void signal_commit_to_inevitable_transaction(void) { + struct stm_priv_segment_info_s* inevitable_segement = get_inevitable_thread_segment(); + if (inevitable_segement != 0) { + // the inevitable thread is still running: set its "please commit" flag (is ignored by the inevitable thread if it is atomic) + inevitable_segement->commit_if_not_atomic = true; + } +} + static void wait_for_inevitable(void) { intptr_t detached = 0; @@ -382,6 +390,8 @@ try to detach an inevitable transaction regularly */ detached = fetch_detached_transaction(); if (detached == 0) { + // the inevitable trx was not detached or it was detached but is atomic + signal_commit_to_inevitable_transaction(); EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) goto wait_some_more; @@ -1578,44 +1588,42 @@ stm_abort_transaction(); /* is already inevitable, abort */ #endif - bool timed_out = false; - s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && !safe_point_requested()) { + signal_commit_to_inevitable_transaction(); + /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, - 0.000054321)) - timed_out = true; + if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + s_mutex_unlock(); + /* try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + } + goto retry_from_start; + } + num_waits++; } s_mutex_unlock(); - - if (timed_out) { - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - } - else { - num_waits++; - } goto retry_from_start; } - EMIT_WAIT_DONE(); - if (!_validate_and_turn_inevitable()) - goto retry_from_start; + else { + EMIT_WAIT_DONE(); + if (!_validate_and_turn_inevitable()) { + goto retry_from_start; + } + } } - else { - if (!_validate_and_turn_inevitable()) - return; + else if (!_validate_and_turn_inevitable()) { + return; } /* There may be a concurrent commit of a detached Tx going on. @@ -1627,6 +1635,7 @@ stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); + STM_PSEGMENT->commit_if_not_atomic = false; soon_finished_or_inevitable_thread_segment(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -168,6 +168,9 @@ /* For stm_enable_atomic() */ uintptr_t atomic_nesting_levels; + + // TODO signal flag that is checked in throw_away_nursery() for making immediate commit + bool commit_if_not_atomic; }; enum /* safe_point */ { diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -215,6 +215,7 @@ } } +// TODO write tests, verify is working, verify no overflows with adaptive mode uintptr_t stm_is_atomic(stm_thread_local_t *tl) { assert(STM_SEGMENT->running_thread == tl); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -500,6 +500,13 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + if (pseg->commit_if_not_atomic + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately + pseg->pub.nursery_mark = 0; + } + /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { wlog_t *item; diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -293,6 +293,19 @@ return false; } +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void) +{ + struct stm_priv_segment_info_s* segment; + int num; + for (num = 1; num < NB_SEGMENTS; num++) { + segment = get_priv_segment(num); + if (segment->transaction_state == TS_INEVITABLE) { + return segment; + } + } + return 0; +} + __attribute__((unused)) static bool _seems_to_be_running_transaction(void) { diff --git a/c8/stm/sync.h b/c8/stm/sync.h --- a/c8/stm/sync.h +++ b/c8/stm/sync.h @@ -29,6 +29,7 @@ static void release_thread_segment(stm_thread_local_t *tl); static void soon_finished_or_inevitable_thread_segment(void); static bool any_soon_finished_or_inevitable_thread_segment(void); +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, From pypy.commits at gmail.com Tue Jul 11 11:07:31 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:31 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge starvation fix Message-ID: <5964e9b3.82b5df0a.d2327.157a@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2100:66f838f8595b Date: 2017-07-11 13:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/66f838f8595b/ Log: Merge starvation fix diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1638,7 +1638,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = 1; timing_become_inevitable(); @@ -1649,42 +1649,42 @@ if (msg != MSG_INEV_DONT_SLEEP) { dprintf(("become_inevitable: %s\n", msg)); - if (any_soon_finished_or_inevitable_thread_segment() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment()) { #if STM_TESTS /* for tests: another transaction */ stm_abort_transaction(); /* is already inevitable, abort */ #endif + signal_commit_to_inevitable_transaction(); + s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested()) { - - signal_commit_to_inevitable_transaction(); + !safe_point_requested() && + num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { - s_mutex_unlock(); - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - goto retry_from_start; + if (cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + num_waits++; } - num_waits++; } s_mutex_unlock(); + /* XXX try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); + } goto retry_from_start; } else { EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) { + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); goto retry_from_start; } } diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -229,14 +229,18 @@ return STM_PSEGMENT->atomic_nesting_levels; } +// max intptr_t value is 7FFFFFFFFFFFFFFF on 64-bit => larger than 2 * huge value #define HUGE_INTPTR_VALUE 0x3000000000000000L void stm_enable_atomic(stm_thread_local_t *tl) { if (!stm_is_atomic(tl)) { + // do for outermost atomic block only tl->self_or_0_if_atomic = 0; /* increment 'nursery_mark' by HUGE_INTPTR_VALUE, so that - stm_should_break_transaction() returns always false */ + stm_should_break_transaction() returns always false. + preserves the previous nursery_mark, unless it is < 0 + or >= huge value */ intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark; if (mark < 0) mark = 0; @@ -256,6 +260,7 @@ STM_PSEGMENT->atomic_nesting_levels--; if (STM_PSEGMENT->atomic_nesting_levels == 0) { + // revert changes by stm_enable_atomic only if we left the outermost atomic block tl->self_or_0_if_atomic = (intptr_t)tl; /* decrement 'nursery_mark' by HUGE_INTPTR_VALUE, to cancel what was done in stm_enable_atomic() */ diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -543,7 +543,7 @@ pseg->pub.nursery_mark -= nursery_used; if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Tue Jul 11 11:07:34 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:34 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge instrumentation updates Message-ID: <5964e9b6.14b7df0a.39d22.1473@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2102:4f80bfd40482 Date: 2017-07-11 17:03 +0200 http://bitbucket.org/pypy/stmgc/changeset/4f80bfd40482/ Log: Merge instrumentation updates diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -359,9 +359,6 @@ static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ - - start_timer(); - // we don't need the privatization lock, as we are only // reading from modified_old_objs and nobody but us can change it struct list_s *list = STM_PSEGMENT->modified_old_objects; @@ -375,8 +372,6 @@ result->written_count = count; memcpy(result->written, list->items, count * sizeof(struct stm_undo_s)); - stop_timer_and_publish(STM_DURATION_CREATE_CLE); - return result; } From pypy.commits at gmail.com Tue Jul 11 11:07:29 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:29 -0700 (PDT) Subject: [pypy-commit] stmgc c8-efficient-serial-execution-master: Merge fix for starvation when becoming inevitable Message-ID: <5964e9b1.010d1c0a.479a2.f890@mx.google.com> Author: Tobias Weber Branch: c8-efficient-serial-execution-master Changeset: r2099:1d6ca16fa0aa Date: 2017-07-11 13:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/1d6ca16fa0aa/ Log: Merge fix for starvation when becoming inevitable diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1571,7 +1571,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = 1; timing_become_inevitable(); @@ -1582,42 +1582,42 @@ if (msg != MSG_INEV_DONT_SLEEP) { dprintf(("become_inevitable: %s\n", msg)); - if (any_soon_finished_or_inevitable_thread_segment() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment()) { #if STM_TESTS /* for tests: another transaction */ stm_abort_transaction(); /* is already inevitable, abort */ #endif + signal_commit_to_inevitable_transaction(); + s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested()) { - - signal_commit_to_inevitable_transaction(); + !safe_point_requested() && + num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { - s_mutex_unlock(); - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - goto retry_from_start; + if (cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + num_waits++; } - num_waits++; } s_mutex_unlock(); + /* XXX try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); + } goto retry_from_start; } else { EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) { + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); goto retry_from_start; } } diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -501,7 +501,7 @@ pseg->pub.nursery_mark -= nursery_used; if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Tue Jul 11 11:07:36 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:36 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge instrumentation updates Message-ID: <5964e9b8.5e361c0a.c2ee0.98cb@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2103:5d396c49837a Date: 2017-07-11 17:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/5d396c49837a/ Log: Merge instrumentation updates diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -358,9 +358,6 @@ static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ - - start_timer(); - // we don't need the privatization lock, as we are only // reading from modified_old_objs and nobody but us can change it struct list_s *list = STM_PSEGMENT->modified_old_objects; @@ -374,8 +371,6 @@ result->written_count = count; memcpy(result->written, list->items, count * sizeof(struct stm_undo_s)); - stop_timer_and_publish(STM_DURATION_CREATE_CLE); - return result; } From pypy.commits at gmail.com Tue Jul 11 11:07:37 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:37 -0700 (PDT) Subject: [pypy-commit] stmgc c8-binary-trx-length-per-thread: Merge instrumentation updates Message-ID: <5964e9b9.456b1c0a.c795.4088@mx.google.com> Author: Tobias Weber Branch: c8-binary-trx-length-per-thread Changeset: r2104:c86068cb4b91 Date: 2017-07-11 17:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/c86068cb4b91/ Log: Merge instrumentation updates diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -359,9 +359,6 @@ static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ - - start_timer(); - // we don't need the privatization lock, as we are only // reading from modified_old_objs and nobody but us can change it struct list_s *list = STM_PSEGMENT->modified_old_objects; @@ -375,8 +372,6 @@ result->written_count = count; memcpy(result->written, list->items, count * sizeof(struct stm_undo_s)); - stop_timer_and_publish(STM_DURATION_CREATE_CLE); - return result; } From pypy.commits at gmail.com Tue Jul 11 11:07:33 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:33 -0700 (PDT) Subject: [pypy-commit] stmgc c8-overheads-instrumentation: Remove timing that was nested with the expanded validation timing Message-ID: <5964e9b5.d399df0a.625f2.15aa@mx.google.com> Author: Tobias Weber Branch: c8-overheads-instrumentation Changeset: r2101:8efefe78725b Date: 2017-07-11 17:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/8efefe78725b/ Log: Remove timing that was nested with the expanded validation timing diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -358,9 +358,6 @@ static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ - - start_timer(); - // we don't need the privatization lock, as we are only // reading from modified_old_objs and nobody but us can change it struct list_s *list = STM_PSEGMENT->modified_old_objects; @@ -374,8 +371,6 @@ result->written_count = count; memcpy(result->written, list->items, count * sizeof(struct stm_undo_s)); - stop_timer_and_publish(STM_DURATION_CREATE_CLE); - return result; } From pypy.commits at gmail.com Tue Jul 11 11:07:39 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:39 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-prolonged-backoff: Message-ID: <5964e9bb.d696df0a.3aa53.1af9@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-prolonged-backoff Changeset: r2105:925c3ad90c24 Date: 2017-07-11 17:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/925c3ad90c24/ Log: From pypy.commits at gmail.com Tue Jul 11 11:07:40 2017 From: pypy.commits at gmail.com (tobweber) Date: Tue, 11 Jul 2017 08:07:40 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length: Message-ID: <5964e9bc.d0091c0a.28ca8.d7b9@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length Changeset: r2106:7230091fb9c5 Date: 2017-07-11 17:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/7230091fb9c5/ Log: From pypy.commits at gmail.com Wed Jul 12 04:29:45 2017 From: pypy.commits at gmail.com (fijal) Date: Wed, 12 Jul 2017 01:29:45 -0700 (PDT) Subject: [pypy-commit] pypy nogil-unsafe-2: remove the gil Message-ID: <5965ddf9.925b1c0a.4df35.d05f@mx.google.com> Author: fijal Branch: nogil-unsafe-2 Changeset: r91863:7b7a6c3e6e26 Date: 2017-07-12 10:29 +0200 http://bitbucket.org/pypy/pypy/changeset/7b7a6c3e6e26/ Log: remove the gil diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -75,22 +75,22 @@ SLOWPATH: signal "now at safepoint"; 111 -> 110 */ -#define _RPyGilAcquire() do { \ +#define _RPyGilAcquire() /*do { \ assert((__sync_fetch_and_add( \ &RPY_THREADLOCALREF_GET(synclock), 0) \ & 0b001) == 0b0); \ if (!__sync_bool_compare_and_swap( \ &RPY_THREADLOCALREF_GET(synclock), 0b100L, 0b101L)) \ RPyGilAcquireSlowPath(); \ - } while (0) + } while (0)*/ -#define _RPyGilRelease() do { \ +#define _RPyGilRelease() /*do { \ assert((__sync_fetch_and_add( \ &RPY_THREADLOCALREF_GET(synclock), 0) & 0b101) == 0b101); \ if (!__sync_bool_compare_and_swap( \ &RPY_THREADLOCALREF_GET(synclock), 0b101L, 0b100L)) \ RPyGilReleaseSlowPath(); \ - } while (0) + } while (0)*/ static inline long *_RPyFetchFastGil(void) { abort(); From pypy.commits at gmail.com Wed Jul 12 04:59:37 2017 From: pypy.commits at gmail.com (antocuni) Date: Wed, 12 Jul 2017 01:59:37 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add two more versions of the code Message-ID: <5965e4f9.c5b2df0a.5c542.a572@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5812:e138032bcc93 Date: 2017-07-12 08:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/e138032bcc93/ Log: add two more versions of the code diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/benchall.py @@ -2,14 +2,14 @@ import time import pypytools from mplayer import mplayer -import v0, v1, v2, v3 +import v0, v1, v2, v3, v4, v5 def bench(): if len(sys.argv) == 2: v = sys.argv[1] versions = [globals()[v]] else: - versions = [v0, v1, v2, v3] + versions = [v0, v1, v2, v3, v4, v5] if pypytools.IS_PYPY: max_frames = 200 diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/main.py @@ -6,7 +6,7 @@ from mplayer import mplayer, view from math import sqrt import array -import v0, v1, v2, v3 +import v0, v1, v2, v3, v4, v5 def main(argv): if len(argv) > 1: @@ -20,6 +20,8 @@ #out = v1.sobel(img) #out = v2.sobel(img) #out = v3.sobel(img) + #out = v4.sobel(img) + #out = v5.sobel(img) try: view(out) diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py @@ -0,0 +1,40 @@ +import array +from math import sqrt +from v2 import Image + +class Kernel(object): + + def __init__(self, matrix): + self.height = len(matrix) + self.width = len(matrix[0]) + self.matrix = matrix + + def __call__(self, img, x, y): + value = 0.0 + for j, row in enumerate(self.matrix, -(self.height/2)): + for i, k in enumerate(row, -(self.width/2)): + value += img[x+i, y+j] * k + return value + + +Gx = Kernel([[-1.0, 0.0, +1.0], + [-2.0, 0.0, +2.0], + [-1.0, 0.0, +1.0]]) + +Gy = Kernel([[-1.0, -2.0, -1.0], + [0.0, 0.0, 0.0], + [+1.0, +2.0, +1.0]]) + +def sobel(img): + """ + Like v3, but with a generic Kernel class + """ + img = Image(*img) + out = Image(img.width, img.height) + for y in xrange(1, img.height-1): + for x in xrange(1, img.width-1): + dx = Gx(img, x, y) + dy = Gy(img, x, y) + value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) + out[x, y] = value + return out diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py new file mode 100644 --- /dev/null +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py @@ -0,0 +1,42 @@ +import array +from math import sqrt +from v2 import Image +from pypytools.codegen import Code + +def Kernel(matrix): + height = len(matrix) + width = len(matrix[0]) + code = Code() + with code.block('def apply(img, x, y):'): + code.w('value = 0.0') + for j, row in enumerate(matrix, -(height/2)): + for i, k in enumerate(row, -(width/2)): + if k == 0: + continue + code.w('value += img[x+{i}, y+{j}] * {k}', i=i, j=j, k=k) + code.w('return value') + # + code.compile() + return code['apply'] + +Gx = Kernel([[-1.0, 0.0, +1.0], + [-2.0, 0.0, +2.0], + [-1.0, 0.0, +1.0]]) + +Gy = Kernel([[-1.0, -2.0, -1.0], + [0.0, 0.0, 0.0], + [+1.0, +2.0, +1.0]]) + +def sobel(img): + """ + Like v3, but with a generic Kernel class + """ + img = Image(*img) + out = Image(img.width, img.height) + for y in xrange(1, img.height-1): + for x in xrange(1, img.width-1): + dx = Gx(img, x, y) + dy = Gy(img, x, y) + value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) + out[x, y] = value + return out From pypy.commits at gmail.com Wed Jul 12 04:59:38 2017 From: pypy.commits at gmail.com (antocuni) Date: Wed, 12 Jul 2017 01:59:38 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: make v4 and v5 more similar to v3 Message-ID: <5965e4fa.9386df0a.5bac9.e09f@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5813:6ed1dcda3cd7 Date: 2017-07-12 10:32 +0200 http://bitbucket.org/pypy/extradoc/changeset/6ed1dcda3cd7/ Log: make v4 and v5 more similar to v3 diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/v4.py @@ -1,6 +1,6 @@ import array from math import sqrt -from v2 import Image +from v3 import Image class Kernel(object): @@ -9,11 +9,11 @@ self.width = len(matrix[0]) self.matrix = matrix - def __call__(self, img, x, y): + def __call__(self, img, p): value = 0.0 for j, row in enumerate(self.matrix, -(self.height/2)): for i, k in enumerate(row, -(self.width/2)): - value += img[x+i, y+j] * k + value += img[p + (i, j)] * k return value @@ -31,10 +31,9 @@ """ img = Image(*img) out = Image(img.width, img.height) - for y in xrange(1, img.height-1): - for x in xrange(1, img.width-1): - dx = Gx(img, x, y) - dy = Gy(img, x, y) - value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) - out[x, y] = value + for p in img.noborder(): + dx = Gx(img, p) + dy = Gy(img, p) + value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) + out[p] = value return out diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/v5.py @@ -1,19 +1,19 @@ import array from math import sqrt -from v2 import Image +from v3 import Image from pypytools.codegen import Code def Kernel(matrix): height = len(matrix) width = len(matrix[0]) code = Code() - with code.block('def apply(img, x, y):'): + with code.block('def apply(img, p):'): code.w('value = 0.0') for j, row in enumerate(matrix, -(height/2)): for i, k in enumerate(row, -(width/2)): if k == 0: continue - code.w('value += img[x+{i}, y+{j}] * {k}', i=i, j=j, k=k) + code.w('value += img[p+{delta}] * {k}', delta=(i, j), k=k) code.w('return value') # code.compile() @@ -29,14 +29,13 @@ def sobel(img): """ - Like v3, but with a generic Kernel class + Like v4, but unrolling the Kernel loop """ img = Image(*img) out = Image(img.width, img.height) - for y in xrange(1, img.height-1): - for x in xrange(1, img.width-1): - dx = Gx(img, x, y) - dy = Gy(img, x, y) - value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) - out[x, y] = value + for p in img.noborder(): + dx = Gx(img, p) + dy = Gy(img, p) + value = min(int(sqrt(dx*dx + dy*dy) / 2.0), 255) + out[p] = value return out From pypy.commits at gmail.com Wed Jul 12 04:59:40 2017 From: pypy.commits at gmail.com (antocuni) Date: Wed, 12 Jul 2017 01:59:40 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: slides about v4 Message-ID: <5965e4fc.e386df0a.bd601.d7c9@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5814:0383fac69aac Date: 2017-07-12 10:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/0383fac69aac/ Log: slides about v4 diff --git a/talk/ep2017/the-joy-of-pypy-jit/bazinga.jpg b/talk/ep2017/the-joy-of-pypy-jit/bazinga.jpg new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..af860005ed4c9c216800c877ce79653d81e89219 GIT binary patch [cut] diff --git a/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py b/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py --- a/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py +++ b/talk/ep2017/the-joy-of-pypy-jit/sobel/chart.py @@ -1,9 +1,8 @@ import numpy as np import matplotlib.pyplot as plt -N = 5 -CPYTHON = (4.99, 2.85, 1.62, 0.59) -PYPY = (288.67, 278.25, 276.81, 235.91) +CPYTHON = (4.99, 2.85, 1.62, 0.59, 0.33, 0.57) +PYPY = (288.67, 278.25, 276.81, 256.79, 25.31, 244.36) def draw(title, values, allvalues, color): filename = '%s-v%d.png' % (title, len(values)-1) @@ -30,6 +29,5 @@ draw('PyPy', PYPY[:i], PYPY, color='r') draw('CPython', CPYTHON[:i], CPYTHON, color='b') - -## for i, (cpy, pypy) in enumerate(zip(CPYTHON, PYPY)): -## print i, pypy/cpy +for i, (cpy, pypy) in enumerate(zip(CPYTHON, PYPY)): + print i, pypy/cpy diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -322,9 +322,78 @@ |end_columns| -* PyPy is ~400x faster +* PyPy is ~435x faster +Version 4 +---------- + +|tiny| + +.. sourcecode:: python + + class Kernel(object): + + def __init__(self, matrix): + self.height = len(matrix) + self.width = len(matrix[0]) + self.matrix = matrix + + def __call__(self, img, p): + value = 0.0 + for j, row in enumerate(self.matrix, -(self.height/2)): + for i, k in enumerate(row, -(self.width/2)): + value += img[p + (i, j)] * k + return value + + + Gx = Kernel([[-1.0, 0.0, +1.0], + [-2.0, 0.0, +2.0], + [-1.0, 0.0, +1.0]]) + + Gy = Kernel([[-1.0, -2.0, -1.0], + [0.0, 0.0, 0.0], + [+1.0, +2.0, +1.0]]) + + + def sobel(img): + ... + dx = Gx(img, p) + dy = Gy(img, p) + ... + +|end_tiny| + +Version 4 +---------- + +|column1| + +.. image:: sobel/CPython-v4.png + :scale: 30% + +|column2| + +.. image:: sobel/PyPy-v4.png + :scale: 30% + +|end_columns| + +* PyPy massively slower :( + + - (still 76x faster than CPython) + +* I'm a liar + +* PyPy sucks + +Wait +----------------------- + +.. image:: bazinga.jpg + :scale: 30% + :align: center + The cost of abstraction ------------------------ @@ -338,7 +407,7 @@ - abstractions (almost) for free - - v3 is ~20% slower than v0, v1, v2 + - v5 is ~20% slower than v0, v1, v2 PyPy JIT 101 @@ -602,9 +671,9 @@ * "PyPy meets Python 3 and numpy" - - Armin Rigo + - Armin Rigo - - Friday, 14:00 + - Friday, 14:00 * Or, just talk to us :) From pypy.commits at gmail.com Wed Jul 12 04:59:42 2017 From: pypy.commits at gmail.com (antocuni) Date: Wed, 12 Jul 2017 01:59:42 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add slides about v5 Message-ID: <5965e4fe.5e361c0a.f4e2f.df3b@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5815:f172adc02a04 Date: 2017-07-12 10:59 +0200 http://bitbucket.org/pypy/extradoc/changeset/f172adc02a04/ Log: add slides about v5 diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -394,6 +394,74 @@ :scale: 30% :align: center + +Version 5 +---------- + +|scriptsize| + +.. sourcecode:: python + + from pypytools.codegen import Code + + def Kernel(matrix): + height = len(matrix) + width = len(matrix[0]) + code = Code() + with code.block('def apply(img, p):'): + code.w('value = 0.0') + for j, row in enumerate(matrix, -(height/2)): + for i, k in enumerate(row, -(width/2)): + if k == 0: + continue + code.w('value += img[p+{delta}] * {k}', + delta=(i, j), k=k) + code.w('return value') + + code.compile() + return code['apply'] + +|end_scriptsize| + +Version 5 +---------- + +|scriptsize| + +.. sourcecode:: python + + # GENERATED CODE + + def apply(img, p): + value = 0.0 + value += img[p+(-1, -1)] * -1.0 + value += img[p+(1, -1)] * 1.0 + value += img[p+(-1, 0)] * -2.0 + value += img[p+(1, 0)] * 2.0 + value += img[p+(-1, 1)] * -1.0 + value += img[p+(1, 1)] * 1.0 + return value + +|end_scriptsize| + +Version 5 +---------- + +|column1| + +.. image:: sobel/CPython-v5.png + :scale: 30% + +|column2| + +.. image:: sobel/PyPy-v5.png + :scale: 30% + +|end_columns| + +* PyPy ~428x faster again + + The cost of abstraction ------------------------ @@ -407,7 +475,7 @@ - abstractions (almost) for free - - v5 is ~20% slower than v0, v1, v2 + - v5 is ~18% slower than v0, v1, v2 PyPy JIT 101 @@ -417,7 +485,15 @@ * Which code is optimized away? +* Very rough explanation + +* For a deeper view: + - http://speakerdeck.com/u/antocuni/p/pypy-jit-under-the-hood + + - http://www.youtube.com/watch?v=cMtBUvORCfU + + Loops and guards ----------------- @@ -677,3 +753,4 @@ * Or, just talk to us :) +* @pypyproject, @antocuni From pypy.commits at gmail.com Wed Jul 12 08:53:57 2017 From: pypy.commits at gmail.com (antocuni) Date: Wed, 12 Jul 2017 05:53:57 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add a link Message-ID: <59661be5.028b1c0a.56824.4348@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5816:d21eac1e437a Date: 2017-07-12 14:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/d21eac1e437a/ Log: add a link diff --git a/talk/ep2017/the-joy-of-pypy-jit/talk.rst b/talk/ep2017/the-joy-of-pypy-jit/talk.rst --- a/talk/ep2017/the-joy-of-pypy-jit/talk.rst +++ b/talk/ep2017/the-joy-of-pypy-jit/talk.rst @@ -15,6 +15,9 @@ - http://antocuni.eu +- Source code of this demo: + + - https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ep2017/the-joy-of-pypy-jit/ General question ---------------- From pypy.commits at gmail.com Thu Jul 13 08:30:51 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 13 Jul 2017 05:30:51 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Fix for Linux distributions on which os.confstr('CS_GNU_xxx') doesn't work Message-ID: <596767fb.16a5df0a.87aa1.1838@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91864:3d878f5a314f Date: 2017-07-13 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/3d878f5a314f/ Log: Fix for Linux distributions on which os.confstr('CS_GNU_xxx') doesn't work diff --git a/pypy/module/sys/system.py b/pypy/module/sys/system.py --- a/pypy/module/sys/system.py +++ b/pypy/module/sys/system.py @@ -113,17 +113,19 @@ if not space.config.objspace.usemodules.thread: return None from rpython.rlib import rthread + w_version = space.w_None if rthread.RPYTHREAD_NAME == "pthread": w_lock = space.newtext("semaphore" if rthread.USE_SEMAPHORES else "mutex+cond") if rthread.CS_GNU_LIBPTHREAD_VERSION is not None: - w_version = space.newtext( - os.confstr(rthread.CS_GNU_LIBPTHREAD_VERSION)) - else: - w_version = space.w_None + try: + name = os.confstr(rthread.CS_GNU_LIBPTHREAD_VERSION) + except OSError: + pass + else: + w_version = space.newtext(name) else: w_lock = space.w_None - w_version = space.w_None info_w = [ space.newtext(rthread.RPYTHREAD_NAME), w_lock, w_version, From pypy.commits at gmail.com Thu Jul 13 10:49:59 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 13 Jul 2017 07:49:59 -0700 (PDT) Subject: [pypy-commit] pypy vendor/stdlib-3.6: Update stdlib to v3.6.1 Message-ID: <59678897.925b1c0a.4df35.cf0c@mx.google.com> Author: Ronan Lamy Branch: vendor/stdlib-3.6 Changeset: r91865:442dbbc53c68 Date: 2017-07-13 16:40 +0200 http://bitbucket.org/pypy/pypy/changeset/442dbbc53c68/ Log: Update stdlib to v3.6.1 diff too long, truncating to 2000 out of 7640 lines diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -277,7 +277,7 @@ try: UnsupportedOperation = io.UnsupportedOperation except AttributeError: - class UnsupportedOperation(ValueError, OSError): + class UnsupportedOperation(OSError, ValueError): pass diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -303,6 +303,8 @@ # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk # _framesize -- size of one frame in the file + _file = None # Set here since __del__ checks it + def initfp(self, file): self._version = 0 self._convert = None @@ -344,9 +346,15 @@ def __init__(self, f): if isinstance(f, str): - f = builtins.open(f, 'rb') - # else, assume it is an open file object already - self.initfp(f) + file_object = builtins.open(f, 'rb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + else: + # assume it is an open file object already + self.initfp(f) def __enter__(self): return self @@ -541,18 +549,23 @@ # _datalength -- the size of the audio samples written to the header # _datawritten -- the size of the audio samples actually written + _file = None # Set here since __del__ checks it + def __init__(self, f): if isinstance(f, str): - filename = f - f = builtins.open(f, 'wb') + file_object = builtins.open(f, 'wb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + + # treat .aiff file extensions as non-compressed audio + if f.endswith('.aiff'): + self._aifc = 0 else: - # else, assume it is an open file object already - filename = '???' - self.initfp(f) - if filename[-5:] == '.aiff': - self._aifc = 0 - else: - self._aifc = 1 + # assume it is an open file object already + self.initfp(f) def initfp(self, file): self._file = file diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -182,7 +182,7 @@ self._root_section = self._Section(self, None) self._current_section = self._root_section - self._whitespace_matcher = _re.compile(r'\s+') + self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII) self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -11,6 +11,7 @@ import functools import inspect +import os import reprlib import socket import subprocess @@ -611,6 +612,9 @@ # A TLS for the running event loop, used by _get_running_loop. class _RunningLoop(threading.local): _loop = None + _pid = None + + _running_loop = _RunningLoop() @@ -620,7 +624,9 @@ This is a low-level function intended to be used by event loops. This function is thread-specific. """ - return _running_loop._loop + running_loop = _running_loop._loop + if running_loop is not None and _running_loop._pid == os.getpid(): + return running_loop def _set_running_loop(loop): @@ -629,6 +635,7 @@ This is a low-level function intended to be used by event loops. This function is thread-specific. """ + _running_loop._pid = os.getpid() _running_loop._loop = loop diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py --- a/lib-python/3/asyncio/subprocess.py +++ b/lib-python/3/asyncio/subprocess.py @@ -24,6 +24,8 @@ self._limit = limit self.stdin = self.stdout = self.stderr = None self._transport = None + self._process_exited = False + self._pipe_fds = [] def __repr__(self): info = [self.__class__.__name__] @@ -43,12 +45,14 @@ self.stdout = streams.StreamReader(limit=self._limit, loop=self._loop) self.stdout.set_transport(stdout_transport) + self._pipe_fds.append(1) stderr_transport = transport.get_pipe_transport(2) if stderr_transport is not None: self.stderr = streams.StreamReader(limit=self._limit, loop=self._loop) self.stderr.set_transport(stderr_transport) + self._pipe_fds.append(2) stdin_transport = transport.get_pipe_transport(0) if stdin_transport is not None: @@ -86,9 +90,18 @@ else: reader.set_exception(exc) + if fd in self._pipe_fds: + self._pipe_fds.remove(fd) + self._maybe_close_transport() + def process_exited(self): - self._transport.close() - self._transport = None + self._process_exited = True + self._maybe_close_transport() + + def _maybe_close_transport(self): + if len(self._pipe_fds) == 0 and self._process_exited: + self._transport.close() + self._transport = None class Process: diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py --- a/lib-python/3/asyncio/tasks.py +++ b/lib-python/3/asyncio/tasks.py @@ -487,7 +487,8 @@ """ warnings.warn("asyncio.async() function is deprecated, use ensure_future()", - DeprecationWarning) + DeprecationWarning, + stacklevel=2) return ensure_future(coro_or_future, loop=loop) diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py --- a/lib-python/3/asyncio/test_utils.py +++ b/lib-python/3/asyncio/test_utils.py @@ -449,12 +449,15 @@ self.set_event_loop(loop) return loop + def unpatch_get_running_loop(self): + events._get_running_loop = self._get_running_loop + def setUp(self): self._get_running_loop = events._get_running_loop events._get_running_loop = lambda: None def tearDown(self): - events._get_running_loop = self._get_running_loop + self.unpatch_get_running_loop() events.set_event_loop(None) diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -541,7 +541,8 @@ def encodestring(s): """Legacy alias of encodebytes().""" import warnings - warnings.warn("encodestring() is a deprecated alias, use encodebytes()", + warnings.warn("encodestring() is a deprecated alias since 3.1, " + "use encodebytes()", DeprecationWarning, 2) return encodebytes(s) @@ -554,7 +555,8 @@ def decodestring(s): """Legacy alias of decodebytes().""" import warnings - warnings.warn("decodestring() is a deprecated alias, use decodebytes()", + warnings.warn("decodestring() is a deprecated alias since Python 3.1, " + "use decodebytes()", DeprecationWarning, 2) return decodebytes(s) diff --git a/lib-python/3/collections/__init__.py b/lib-python/3/collections/__init__.py --- a/lib-python/3/collections/__init__.py +++ b/lib-python/3/collections/__init__.py @@ -189,6 +189,7 @@ link = self.__map[key] link_prev = link.prev link_next = link.next + soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root @@ -196,12 +197,14 @@ last = root.prev link.prev = last link.next = root - last.next = root.prev = link + root.prev = soft_link + last.next = link else: first = root.next link.prev = root link.next = first - root.next = first.prev = link + first.prev = soft_link + root.next = link def __sizeof__(self): sizeof = _sys.getsizeof diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -143,6 +143,7 @@ import functools import io import itertools +import os import re import sys import warnings @@ -687,7 +688,7 @@ Return list of successfully read files. """ - if isinstance(filenames, str): + if isinstance(filenames, (str, os.PathLike)): filenames = [filenames] read_ok = [] for filename in filenames: @@ -696,6 +697,8 @@ self._read(fp, filename) except OSError: continue + if isinstance(filename, os.PathLike): + filename = os.fspath(filename) read_ok.append(filename) return read_ok diff --git a/lib-python/3/contextlib.py b/lib-python/3/contextlib.py --- a/lib-python/3/contextlib.py +++ b/lib-python/3/contextlib.py @@ -105,7 +105,7 @@ # raised inside the "with" statement from being suppressed. return exc is not value except RuntimeError as exc: - # Don't re-raise the passed in exception. (issue27112) + # Don't re-raise the passed in exception. (issue27122) if exc is value: return False # Likewise, avoid suppressing if a StopIteration exception diff --git a/lib-python/3/ctypes/__init__.py b/lib-python/3/ctypes/__init__.py --- a/lib-python/3/ctypes/__init__.py +++ b/lib-python/3/ctypes/__init__.py @@ -324,6 +324,10 @@ """ _func_flags_ = _FUNCFLAG_CDECL _func_restype_ = c_int + # default values for repr + _name = '' + _handle = 0 + _FuncPtr = None def __init__(self, name, mode=DEFAULT_MODE, handle=None, use_errno=False, diff --git a/lib-python/3/ctypes/test/test_callbacks.py b/lib-python/3/ctypes/test/test_callbacks.py --- a/lib-python/3/ctypes/test/test_callbacks.py +++ b/lib-python/3/ctypes/test/test_callbacks.py @@ -244,6 +244,7 @@ def test_callback_large_struct(self): class Check: pass + # This should mirror the structure in Modules/_ctypes/_ctypes_test.c class X(Structure): _fields_ = [ ('first', c_ulong), @@ -255,6 +256,11 @@ check.first = s.first check.second = s.second check.third = s.third + # See issue #29565. + # The structure should be passed by value, so + # any changes to it should not be reflected in + # the value passed + s.first = s.second = s.third = 0x0badf00d check = Check() s = X() @@ -275,6 +281,11 @@ self.assertEqual(check.first, 0xdeadbeef) self.assertEqual(check.second, 0xcafebabe) self.assertEqual(check.third, 0x0bad1dea) + # See issue #29565. + # Ensure that the original struct is unchanged. + self.assertEqual(s.first, check.first) + self.assertEqual(s.second, check.second) + self.assertEqual(s.third, check.third) ################################################################ diff --git a/lib-python/3/ctypes/test/test_structures.py b/lib-python/3/ctypes/test/test_structures.py --- a/lib-python/3/ctypes/test/test_structures.py +++ b/lib-python/3/ctypes/test/test_structures.py @@ -3,6 +3,7 @@ from ctypes.test import need_symbol from struct import calcsize import _testcapi +import _ctypes_test class SubclassesTest(unittest.TestCase): def test_subclass(self): @@ -391,6 +392,28 @@ (1, 0, 0, 0, 0, 0)) self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7)) + def test_pass_by_value(self): + # This should mirror the structure in Modules/_ctypes/_ctypes_test.c + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_large_struct_update_value + func.argtypes = (X,) + func.restype = None + func(s) + self.assertEqual(s.first, 0xdeadbeef) + self.assertEqual(s.second, 0xcafebabe) + self.assertEqual(s.third, 0x0bad1dea) + class PointerMemberTestCase(unittest.TestCase): def test(self): diff --git a/lib-python/3/curses/ascii.py b/lib-python/3/curses/ascii.py --- a/lib-python/3/curses/ascii.py +++ b/lib-python/3/curses/ascii.py @@ -53,19 +53,19 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) -def isascii(c): return _ctoi(c) <= 127 # ? +def isascii(c): return 0 <= _ctoi(c) <= 127 # ? def isblank(c): return _ctoi(c) in (9, 32) -def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 -def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 -def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 -def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 -def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 +def iscntrl(c): return 0 <= _ctoi(c) <= 31 or _ctoi(c) == 127 +def isdigit(c): return 48 <= _ctoi(c) <= 57 +def isgraph(c): return 33 <= _ctoi(c) <= 126 +def islower(c): return 97 <= _ctoi(c) <= 122 +def isprint(c): return 32 <= _ctoi(c) <= 126 def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) -def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 +def isupper(c): return 65 <= _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ - (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102) -def isctrl(c): return _ctoi(c) < 32 + (65 <= _ctoi(c) <= 70) or (97 <= _ctoi(c) <= 102) +def isctrl(c): return 0 <= _ctoi(c) < 32 def ismeta(c): return _ctoi(c) > 127 def ascii(c): diff --git a/lib-python/3/curses/textpad.py b/lib-python/3/curses/textpad.py --- a/lib-python/3/curses/textpad.py +++ b/lib-python/3/curses/textpad.py @@ -43,16 +43,20 @@ def __init__(self, win, insert_mode=False): self.win = win self.insert_mode = insert_mode - (self.maxy, self.maxx) = win.getmaxyx() - self.maxy = self.maxy - 1 - self.maxx = self.maxx - 1 + self._update_max_yx() self.stripspaces = 1 self.lastcmd = None win.keypad(1) + def _update_max_yx(self): + maxy, maxx = self.win.getmaxyx() + self.maxy = maxy - 1 + self.maxx = maxx - 1 + def _end_of_line(self, y): """Go to the location of the first blank on the given line, returning the index of the last non-blank character.""" + self._update_max_yx() last = self.maxx while True: if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP: @@ -64,8 +68,10 @@ return last def _insert_printable_char(self, ch): + self._update_max_yx() (y, x) = self.win.getyx() - if y < self.maxy or x < self.maxx: + backyx = None + while y < self.maxy or x < self.maxx: if self.insert_mode: oldch = self.win.inch() # The try-catch ignores the error we trigger from some curses @@ -75,14 +81,20 @@ self.win.addch(ch) except curses.error: pass - if self.insert_mode: - (backy, backx) = self.win.getyx() - if curses.ascii.isprint(oldch): - self._insert_printable_char(oldch) - self.win.move(backy, backx) + if not self.insert_mode or not curses.ascii.isprint(oldch): + break + ch = oldch + (y, x) = self.win.getyx() + # Remember where to put the cursor back since we are in insert_mode + if backyx is None: + backyx = y, x + + if backyx is not None: + self.win.move(*backyx) def do_command(self, ch): "Process a single editing command." + self._update_max_yx() (y, x) = self.win.getyx() self.lastcmd = ch if curses.ascii.isprint(ch): @@ -148,6 +160,7 @@ def gather(self): "Collect and return the contents of the window." result = "" + self._update_max_yx() for y in range(self.maxy+1): self.win.move(y, 0) stop = self._end_of_line(y) diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1053,7 +1053,7 @@ hour, minute (required) second, microsecond (default to zero) tzinfo (default to None) - fold (keyword only, default to True) + fold (keyword only, default to zero) """ if isinstance(hour, bytes) and len(hour) == 6 and hour[0]&0x7F < 24: # Pickle support diff --git a/lib-python/3/dbm/dumb.py b/lib-python/3/dbm/dumb.py --- a/lib-python/3/dbm/dumb.py +++ b/lib-python/3/dbm/dumb.py @@ -97,8 +97,9 @@ try: f = _io.open(self._dirfile, 'r', encoding="Latin-1") except OSError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -113,7 +114,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -197,6 +198,7 @@ elif not isinstance(val, (bytes, bytearray)): raise TypeError("values must be bytes or strings") self._verify_open() + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -229,6 +231,7 @@ if isinstance(key, str): key = key.encode('utf-8') self._verify_open() + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always diff --git a/lib-python/3/distutils/command/wininst-14.0-amd64.exe b/lib-python/3/distutils/command/wininst-14.0-amd64.exe index 22299543a97ffc1525a3b1c778cb158d6c6430ad..253c2e2eccefa79393827f44f85680536906574a GIT binary patch [cut] diff --git a/lib-python/3/distutils/command/wininst-14.0.exe b/lib-python/3/distutils/command/wininst-14.0.exe index 0dac1103d98db0af1e9027c41fe921136c5f6396..46f5f356676c800f99742deb6bf4c0a96aa166c0 GIT binary patch [cut] diff --git a/lib-python/3/distutils/tests/test_bdist_rpm.py b/lib-python/3/distutils/tests/test_bdist_rpm.py --- a/lib-python/3/distutils/tests/test_bdist_rpm.py +++ b/lib-python/3/distutils/tests/test_bdist_rpm.py @@ -94,7 +94,7 @@ @unittest.skipIf(find_executable('rpmbuild') is None, 'the rpmbuild command is not found') def test_no_optimize_flag(self): - # let's create a package that brakes bdist_rpm + # let's create a package that breaks bdist_rpm tmp_dir = self.mkdtemp() os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation pkg_dir = os.path.join(tmp_dir, 'foo') diff --git a/lib-python/3/enum.py b/lib-python/3/enum.py --- a/lib-python/3/enum.py +++ b/lib-python/3/enum.py @@ -1,7 +1,7 @@ import sys from types import MappingProxyType, DynamicClassAttribute from functools import reduce -from operator import or_ as _or_, and_ as _and_, xor, neg +from operator import or_ as _or_ # try _collections first to reduce startup cost try: @@ -690,7 +690,9 @@ pseudo_member = object.__new__(cls) pseudo_member._name_ = None pseudo_member._value_ = value - cls._value2member_map_[value] = pseudo_member + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member def __contains__(self, other): @@ -785,7 +787,9 @@ pseudo_member = int.__new__(cls, value) pseudo_member._name_ = None pseudo_member._value_ = value - cls._value2member_map_[value] = pseudo_member + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member def __or__(self, other): @@ -835,18 +839,21 @@ # _decompose is only called if the value is not named not_covered = value negative = value < 0 + # issue29167: wrap accesses to _value2member_map_ in a list to avoid race + # conditions between iterating over it and having more psuedo- + # members added to it if negative: # only check for named flags flags_to_check = [ (m, v) - for v, m in flag._value2member_map_.items() + for v, m in list(flag._value2member_map_.items()) if m.name is not None ] else: # check for named flags and powers-of-two flags flags_to_check = [ (m, v) - for v, m in flag._value2member_map_.items() + for v, m in list(flag._value2member_map_.items()) if m.name is not None or _power_of_two(v) ] members = [] diff --git a/lib-python/3/functools.py b/lib-python/3/functools.py --- a/lib-python/3/functools.py +++ b/lib-python/3/functools.py @@ -421,7 +421,7 @@ def _make_key(args, kwds, typed, kwd_mark = (object(),), fasttypes = {int, str, frozenset, type(None)}, - sorted=sorted, tuple=tuple, type=type, len=len): + tuple=tuple, type=type, len=len): """Make a cache key from optionally typed positional and keyword arguments The key is constructed in a way that is flat as possible rather than @@ -434,14 +434,13 @@ """ key = args if kwds: - sorted_items = sorted(kwds.items()) key += kwd_mark - for item in sorted_items: + for item in kwds.items(): key += item if typed: key += tuple(type(v) for v in args) if kwds: - key += tuple(type(v) for k, v in sorted_items) + key += tuple(type(v) for v in kwds.values()) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) @@ -493,6 +492,7 @@ hits = misses = 0 full = False cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() lock = RLock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list root[:] = [root, root, None, None] # initialize by pointing to self @@ -574,14 +574,16 @@ last = root[PREV] link = [last, root, key, result] last[NEXT] = root[PREV] = cache[key] = link - full = (len(cache) >= maxsize) + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = (cache_len() >= maxsize) misses += 1 return result def cache_info(): """Report cache statistics""" with lock: - return _CacheInfo(hits, misses, maxsize, len(cache)) + return _CacheInfo(hits, misses, maxsize, cache_len()) def cache_clear(): """Clear the cache and cache statistics""" diff --git a/lib-python/3/getpass.py b/lib-python/3/getpass.py --- a/lib-python/3/getpass.py +++ b/lib-python/3/getpass.py @@ -7,7 +7,6 @@ echoing of the password contents while reading. On Windows, the msvcrt module will be used. -On the Mac EasyDialogs.AskPassword is used, if available. """ diff --git a/lib-python/3/idlelib/colorizer.py b/lib-python/3/idlelib/colorizer.py --- a/lib-python/3/idlelib/colorizer.py +++ b/lib-python/3/idlelib/colorizer.py @@ -21,7 +21,7 @@ # 1st 'file' colorized normal, 2nd as builtin, 3rd as string builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b" comment = any("COMMENT", [r"#[^\n]*"]) - stringprefix = r"(\br|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR|rb|rB|Rb|RB)?" + stringprefix = r"(?i:\br|u|f|fr|rf|b|br|rb)?" sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?' sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" @@ -261,8 +261,15 @@ top = Toplevel(parent) top.title("Test ColorDelegator") x, y = map(int, parent.geometry().split('+')[1:]) - top.geometry("200x100+%d+%d" % (x + 250, y + 175)) - source = "if somename: x = 'abc' # comment\nprint\n" + top.geometry("700x250+%d+%d" % (x + 20, y + 175)) + source = ("# Following has syntax errors\n" + "if True: then int 1\nelif False: print 0\nelse: float(None)\n" + "if iF + If + IF: 'keywork matching must respect case'\n" + "# All valid prefixes for unicode and byte strings should be colored\n" + "'x', '''x''', \"x\", \"\"\"x\"\"\"\n" + "r'x', u'x', R'x', U'x', f'x', F'x', ur'is invalid'\n" + "fr'x', Fr'x', fR'x', FR'x', rf'x', rF'x', Rf'x', RF'x'\n" + "b'x',B'x', br'x',Br'x',bR'x',BR'x', rb'x'.rB'x',Rb'x',RB'x'\n") text = Text(top, background="white") text.pack(expand=1, fill="both") text.insert("insert", source) diff --git a/lib-python/3/idlelib/help.html b/lib-python/3/idlelib/help.html --- a/lib-python/3/idlelib/help.html +++ b/lib-python/3/idlelib/help.html @@ -90,7 +90,7 @@

    25.5. IDLE

    -

    Source code: Lib/idlelib/

    +

    Source code: Lib/idlelib/


    IDLE is Python’s Integrated Development and Learning Environment.

    IDLE has the following features:

    diff --git a/lib-python/3/imaplib.py b/lib-python/3/imaplib.py --- a/lib-python/3/imaplib.py +++ b/lib-python/3/imaplib.py @@ -419,7 +419,7 @@ self.literal = _Authenticator(authobject).process typ, dat = self._simple_command('AUTHENTICATE', mech) if typ != 'OK': - raise self.error(dat[-1]) + raise self.error(dat[-1].decode('utf-8', 'replace')) self.state = 'AUTH' return typ, dat diff --git a/lib-python/3/importlib/_bootstrap_external.py b/lib-python/3/importlib/_bootstrap_external.py --- a/lib-python/3/importlib/_bootstrap_external.py +++ b/lib-python/3/importlib/_bootstrap_external.py @@ -1440,6 +1440,4 @@ _setup(_bootstrap_module) supported_loaders = _get_supported_file_loaders() sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) - if _os.__name__ == 'nt': - sys.meta_path.append(WindowsRegistryFinder) sys.meta_path.append(PathFinder) diff --git a/lib-python/3/inspect.py b/lib-python/3/inspect.py --- a/lib-python/3/inspect.py +++ b/lib-python/3/inspect.py @@ -1416,7 +1416,6 @@ except OSError: lines = index = None else: - start = max(start, 1) start = max(0, min(start, len(lines) - context)) lines = lines[start:start+context] index = lineno - 1 - start diff --git a/lib-python/3/logging/__init__.py b/lib-python/3/logging/__init__.py --- a/lib-python/3/logging/__init__.py +++ b/lib-python/3/logging/__init__.py @@ -131,9 +131,14 @@ Otherwise, the string "Level %s" % level is returned. """ - # See Issues #22386 and #27937 for why it's this way - return (_levelToName.get(level) or _nameToLevel.get(level) or - "Level %s" % level) + # See Issues #22386, #27937 and #29220 for why it's this way + result = _levelToName.get(level) + if result is not None: + return result + result = _nameToLevel.get(level) + if result is not None: + return result + return "Level %s" % level def addLevelName(level, levelName): """ diff --git a/lib-python/3/mailbox.py b/lib-python/3/mailbox.py --- a/lib-python/3/mailbox.py +++ b/lib-python/3/mailbox.py @@ -313,11 +313,12 @@ # final position in order to prevent race conditions with changes # from other programs try: - if hasattr(os, 'link'): + try: os.link(tmp_file.name, dest) + except (AttributeError, PermissionError): + os.rename(tmp_file.name, dest) + else: os.remove(tmp_file.name) - else: - os.rename(tmp_file.name, dest) except OSError as e: os.remove(tmp_file.name) if e.errno == errno.EEXIST: @@ -1200,13 +1201,14 @@ for key in self.iterkeys(): if key - 1 != prev: changes.append((key, prev + 1)) - if hasattr(os, 'link'): + try: os.link(os.path.join(self._path, str(key)), os.path.join(self._path, str(prev + 1))) - os.unlink(os.path.join(self._path, str(key))) - else: + except (AttributeError, PermissionError): os.rename(os.path.join(self._path, str(key)), os.path.join(self._path, str(prev + 1))) + else: + os.unlink(os.path.join(self._path, str(key))) prev += 1 self._next_key = prev + 1 if len(changes) == 0: @@ -2076,13 +2078,14 @@ else: raise try: - if hasattr(os, 'link'): + try: os.link(pre_lock.name, f.name + '.lock') dotlock_done = True - os.unlink(pre_lock.name) - else: + except (AttributeError, PermissionError): os.rename(pre_lock.name, f.name + '.lock') dotlock_done = True + else: + os.unlink(pre_lock.name) except FileExistsError: os.remove(pre_lock.name) raise ExternalClashError('dot lock unavailable: %s' % diff --git a/lib-python/3/multiprocessing/context.py b/lib-python/3/multiprocessing/context.py --- a/lib-python/3/multiprocessing/context.py +++ b/lib-python/3/multiprocessing/context.py @@ -196,7 +196,7 @@ def get_start_method(self, allow_none=False): return self._name - def set_start_method(self, method=None): + def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property diff --git a/lib-python/3/multiprocessing/spawn.py b/lib-python/3/multiprocessing/spawn.py --- a/lib-python/3/multiprocessing/spawn.py +++ b/lib-python/3/multiprocessing/spawn.py @@ -217,7 +217,7 @@ process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: - set_start_method(data['start_method']) + set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) diff --git a/lib-python/3/pathlib.py b/lib-python/3/pathlib.py --- a/lib-python/3/pathlib.py +++ b/lib-python/3/pathlib.py @@ -192,7 +192,9 @@ s = self._ext_to_normal(_getfinalpathname(s)) except FileNotFoundError: previous_s = s - s = os.path.abspath(os.path.join(s, os.pardir)) + s = os.path.dirname(s) + if previous_s == s: + return path else: if previous_s is None: return s @@ -1233,7 +1235,7 @@ if not exist_ok or not self.is_dir(): raise except OSError as e: - if e.errno != ENOENT: + if e.errno != ENOENT or self.parent == self: raise self.parent.mkdir(parents=True) self._accessor.mkdir(self, mode) diff --git a/lib-python/3/platform.py b/lib-python/3/platform.py --- a/lib-python/3/platform.py +++ b/lib-python/3/platform.py @@ -110,7 +110,7 @@ """ -__version__ = '1.0.7' +__version__ = '1.0.8' import collections import sys, os, re, subprocess @@ -1198,7 +1198,9 @@ elif buildtime: builddate = builddate + ' ' + buildtime - if hasattr(sys, '_mercurial'): + if hasattr(sys, '_git'): + _, branch, revision = sys._git + elif hasattr(sys, '_mercurial'): _, branch, revision = sys._mercurial elif hasattr(sys, 'subversion'): # sys.subversion was added in Python 2.5 diff --git a/lib-python/3/pstats.py b/lib-python/3/pstats.py --- a/lib-python/3/pstats.py +++ b/lib-python/3/pstats.py @@ -48,11 +48,14 @@ printed. The sort_stats() method now processes some additional options (i.e., in - addition to the old -1, 0, 1, or 2). It takes an arbitrary number of - quoted strings to select the sort order. For example sort_stats('time', - 'name') sorts on the major key of 'internal function time', and on the - minor key of 'the name of the function'. Look at the two tables in - sort_stats() and get_sort_arg_defs(self) for more examples. + addition to the old -1, 0, 1, or 2 that are respectively interpreted as + 'stdname', 'calls', 'time', and 'cumulative'). It takes an arbitrary number + of quoted strings to select the sort order. + + For example sort_stats('time', 'name') sorts on the major key of 'internal + function time', and on the minor key of 'the name of the function'. Look at + the two tables in sort_stats() and get_sort_arg_defs(self) for more + examples. All methods return self, so you can string together commands like: Stats('foo', 'goo').strip_dirs().sort_stats('calls').\ diff --git a/lib-python/3/pydoc_data/topics.py b/lib-python/3/pydoc_data/topics.py --- a/lib-python/3/pydoc_data/topics.py +++ b/lib-python/3/pydoc_data/topics.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Fri Dec 16 16:33:16 2016 -topics = {'assert': '\n' - 'The "assert" statement\n' +# Autogenerated by Sphinx on Sat Mar 4 12:14:44 2017 +topics = {'assert': 'The "assert" statement\n' '**********************\n' '\n' 'Assert statements are a convenient way to insert debugging ' @@ -39,8 +38,7 @@ 'Assignments to "__debug__" are illegal. The value for the ' 'built-in\n' 'variable is determined when the interpreter starts.\n', - 'assignment': '\n' - 'Assignment statements\n' + 'assignment': 'Assignment statements\n' '*********************\n' '\n' 'Assignment statements are used to (re)bind names to values and ' @@ -405,8 +403,7 @@ 'See also: **PEP 526** - Variable and attribute annotation ' 'syntax\n' ' **PEP 484** - Type hints\n', - 'atom-identifiers': '\n' - 'Identifiers (Names)\n' + 'atom-identifiers': 'Identifiers (Names)\n' '*******************\n' '\n' 'An identifier occurring as an atom is a name. See ' @@ -446,8 +443,7 @@ 'happen. If the class name consists only of underscores, ' 'no\n' 'transformation is done.\n', - 'atom-literals': '\n' - 'Literals\n' + 'atom-literals': 'Literals\n' '********\n' '\n' 'Python supports string and bytes literals and various ' @@ -476,8 +472,7 @@ 'may obtain\n' 'the same object or a different object with the same ' 'value.\n', - 'attribute-access': '\n' - 'Customizing attribute access\n' + 'attribute-access': 'Customizing attribute access\n' '****************************\n' '\n' 'The following methods can be defined to customize the ' @@ -851,8 +846,7 @@ '* *__class__* assignment works only if both classes have ' 'the same\n' ' *__slots__*.\n', - 'attribute-references': '\n' - 'Attribute references\n' + 'attribute-references': 'Attribute references\n' '********************\n' '\n' 'An attribute reference is a primary followed by a ' @@ -875,8 +869,7 @@ 'determined by the object. Multiple evaluations of ' 'the same attribute\n' 'reference may yield different objects.\n', - 'augassign': '\n' - 'Augmented assignment statements\n' + 'augassign': 'Augmented assignment statements\n' '*******************************\n' '\n' 'Augmented assignment is the combination, in a single statement, ' @@ -940,8 +933,7 @@ 'about\n' 'class and instance attributes applies as for regular ' 'assignments.\n', - 'binary': '\n' - 'Binary arithmetic operations\n' + 'binary': 'Binary arithmetic operations\n' '****************************\n' '\n' 'The binary arithmetic operations have the conventional priority\n' @@ -1029,8 +1021,7 @@ 'The "-" (subtraction) operator yields the difference of its ' 'arguments.\n' 'The numeric arguments are first converted to a common type.\n', - 'bitwise': '\n' - 'Binary bitwise operations\n' + 'bitwise': 'Binary bitwise operations\n' '*************************\n' '\n' 'Each of the three bitwise operations has a different priority ' @@ -1050,8 +1041,7 @@ 'The "|" operator yields the bitwise (inclusive) OR of its ' 'arguments,\n' 'which must be integers.\n', - 'bltin-code-objects': '\n' - 'Code Objects\n' + 'bltin-code-objects': 'Code Objects\n' '************\n' '\n' 'Code objects are used by the implementation to ' @@ -1074,8 +1064,7 @@ '\n' 'See The standard type hierarchy for more ' 'information.\n', - 'bltin-ellipsis-object': '\n' - 'The Ellipsis Object\n' + 'bltin-ellipsis-object': 'The Ellipsis Object\n' '*******************\n' '\n' 'This object is commonly used by slicing (see ' @@ -1087,8 +1076,7 @@ '"Ellipsis" singleton.\n' '\n' 'It is written as "Ellipsis" or "...".\n', - 'bltin-null-object': '\n' - 'The Null Object\n' + 'bltin-null-object': 'The Null Object\n' '***************\n' '\n' "This object is returned by functions that don't " @@ -1100,8 +1088,7 @@ 'same singleton.\n' '\n' 'It is written as "None".\n', - 'bltin-type-objects': '\n' - 'Type Objects\n' + 'bltin-type-objects': 'Type Objects\n' '************\n' '\n' 'Type objects represent the various object types. An ' @@ -1113,8 +1100,7 @@ 'all standard built-in types.\n' '\n' 'Types are written like this: "".\n', - 'booleans': '\n' - 'Boolean operations\n' + 'booleans': 'Boolean operations\n' '******************\n' '\n' ' or_test ::= and_test | or_test "or" and_test\n' @@ -1163,8 +1149,7 @@ 'its\n' 'argument (for example, "not \'foo\'" produces "False" rather ' 'than "\'\'".)\n', - 'break': '\n' - 'The "break" statement\n' + 'break': 'The "break" statement\n' '*********************\n' '\n' ' break_stmt ::= "break"\n' @@ -1185,8 +1170,7 @@ 'clause, that "finally" clause is executed before really leaving ' 'the\n' 'loop.\n', - 'callable-types': '\n' - 'Emulating callable objects\n' + 'callable-types': 'Emulating callable objects\n' '**************************\n' '\n' 'object.__call__(self[, args...])\n' @@ -1195,8 +1179,7 @@ 'this method\n' ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' ' "x.__call__(arg1, arg2, ...)".\n', - 'calls': '\n' - 'Calls\n' + 'calls': 'Calls\n' '*****\n' '\n' 'A call calls a callable object (e.g., a *function*) with a ' @@ -1217,7 +1200,8 @@ ' ("," "*" expression | "," ' 'keyword_item)*\n' ' keywords_arguments ::= (keyword_item | "**" expression)\n' - ' ("," keyword_item | "**" expression)*\n' + ' ("," keyword_item | "," "**" ' + 'expression)*\n' ' keyword_item ::= identifier "=" expression\n' '\n' 'An optional trailing comma may be present after the positional and\n' @@ -1382,8 +1366,7 @@ ' The class must define a "__call__()" method; the effect is then ' 'the\n' ' same as if that method was called.\n', - 'class': '\n' - 'Class definitions\n' + 'class': 'Class definitions\n' '*****************\n' '\n' 'A class definition defines a class object (see section The ' @@ -1469,8 +1452,7 @@ '\n' 'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n' ' Class Decorators\n', - 'comparisons': '\n' - 'Comparisons\n' + 'comparisons': 'Comparisons\n' '***********\n' '\n' 'Unlike C, all comparison operations in Python have the same ' @@ -1623,7 +1605,7 @@ 'restriction that\n' ' ranges do not support order comparison. Equality ' 'comparison across\n' - ' these types results in unequality, and ordering comparison ' + ' these types results in inequality, and ordering comparison ' 'across\n' ' these types raises "TypeError".\n' '\n' @@ -1762,6 +1744,12 @@ ' to sequences, but not to sets or mappings). See also the\n' ' "total_ordering()" decorator.\n' '\n' + '* The "hash()" result should be consistent with equality. ' + 'Objects\n' + ' that are equal should either have the same hash value, or ' + 'be marked\n' + ' as unhashable.\n' + '\n' 'Python does not enforce these consistency rules. In fact, ' 'the\n' 'not-a-number values are an example for not following these ' @@ -1833,8 +1821,7 @@ 'is determined using the "id()" function. "x is not y" yields ' 'the\n' 'inverse truth value. [4]\n', - 'compound': '\n' - 'Compound statements\n' + 'compound': 'Compound statements\n' '*******************\n' '\n' 'Compound statements contain (groups of) other statements; they ' @@ -2725,8 +2712,7 @@ ' body is transformed into the namespace\'s "__doc__" item ' 'and\n' " therefore the class's *docstring*.\n", - 'context-managers': '\n' - 'With Statement Context Managers\n' + 'context-managers': 'With Statement Context Managers\n' '*******************************\n' '\n' 'A *context manager* is an object that defines the ' @@ -2788,8 +2774,7 @@ ' The specification, background, and examples for the ' 'Python "with"\n' ' statement.\n', - 'continue': '\n' - 'The "continue" statement\n' + 'continue': 'The "continue" statement\n' '************************\n' '\n' ' continue_stmt ::= "continue"\n' @@ -2806,8 +2791,7 @@ '"finally" clause, that "finally" clause is executed before ' 'really\n' 'starting the next loop cycle.\n', - 'conversions': '\n' - 'Arithmetic conversions\n' + 'conversions': 'Arithmetic conversions\n' '**********************\n' '\n' 'When a description of an arithmetic operator below uses the ' @@ -2833,8 +2817,7 @@ "left argument to the '%' operator). Extensions must define " 'their own\n' 'conversion behavior.\n', - 'customization': '\n' - 'Basic customization\n' + 'customization': 'Basic customization\n' '*******************\n' '\n' 'object.__new__(cls[, ...])\n' @@ -3153,15 +3136,18 @@ 'on members\n' ' of hashed collections including "set", "frozenset", and ' '"dict".\n' - ' "__hash__()" should return an integer. The only ' - 'required property\n' + ' "__hash__()" should return an integer. The only required ' + 'property\n' ' is that objects which compare equal have the same hash ' 'value; it is\n' - ' advised to somehow mix together (e.g. using exclusive ' - 'or) the hash\n' - ' values for the components of the object that also play a ' - 'part in\n' - ' comparison of objects.\n' + ' advised to mix together the hash values of the ' + 'components of the\n' + ' object that also play a part in comparison of objects by ' + 'packing\n' + ' them into a tuple and hashing the tuple. Example:\n' + '\n' + ' def __hash__(self):\n' + ' return hash((self.name, self.nick, self.color))\n' '\n' ' Note: "hash()" truncates the value returned from an ' "object's\n" @@ -3273,8 +3259,7 @@ ' neither "__len__()" nor "__bool__()", all its instances ' 'are\n' ' considered true.\n', - 'debugger': '\n' - '"pdb" --- The Python Debugger\n' + 'debugger': '"pdb" --- The Python Debugger\n' '*****************************\n' '\n' '**Source code:** Lib/pdb.py\n' @@ -3939,8 +3924,7 @@ '[1] Whether a frame is considered to originate in a certain ' 'module\n' ' is determined by the "__name__" in the frame globals.\n', - 'del': '\n' - 'The "del" statement\n' + 'del': 'The "del" statement\n' '*******************\n' '\n' ' del_stmt ::= "del" target_list\n' @@ -3969,8 +3953,7 @@ 'Changed in version 3.2: Previously it was illegal to delete a name\n' 'from the local namespace if it occurs as a free variable in a nested\n' 'block.\n', - 'dict': '\n' - 'Dictionary displays\n' + 'dict': 'Dictionary displays\n' '*******************\n' '\n' 'A dictionary display is a possibly empty series of key/datum pairs\n' @@ -4014,8 +3997,7 @@ 'should be *hashable*, which excludes all mutable objects.) Clashes\n' 'between duplicate keys are not detected; the last datum (textually\n' 'rightmost in the display) stored for a given key value prevails.\n', - 'dynamic-features': '\n' - 'Interaction with dynamic features\n' + 'dynamic-features': 'Interaction with dynamic features\n' '*********************************\n' '\n' 'Name resolution of free variables occurs at runtime, not ' @@ -4051,8 +4033,7 @@ 'override the global and local namespace. If only one ' 'namespace is\n' 'specified, it is used for both.\n', - 'else': '\n' - 'The "if" statement\n' + 'else': 'The "if" statement\n' '******************\n' '\n' 'The "if" statement is used for conditional execution:\n' @@ -4069,8 +4050,7 @@ '(and no other part of the "if" statement is executed or evaluated).\n' 'If all expressions are false, the suite of the "else" clause, if\n' 'present, is executed.\n', - 'exceptions': '\n' - 'Exceptions\n' + 'exceptions': 'Exceptions\n' '**********\n' '\n' 'Exceptions are a means of breaking out of the normal flow of ' @@ -4146,8 +4126,7 @@ ' these operations is not available at the time the module ' 'is\n' ' compiled.\n', - 'execmodel': '\n' - 'Execution model\n' + 'execmodel': 'Execution model\n' '***************\n' '\n' '\n' @@ -4478,8 +4457,7 @@ ' these operations is not available at the time the module ' 'is\n' ' compiled.\n', - 'exprlists': '\n' - 'Expression lists\n' + 'exprlists': 'Expression lists\n' '****************\n' '\n' ' expression_list ::= expression ( "," expression )* [","]\n' @@ -4516,8 +4494,7 @@ 'value of that expression. (To create an empty tuple, use an ' 'empty pair\n' 'of parentheses: "()".)\n', - 'floating': '\n' - 'Floating point literals\n' + 'floating': 'Floating point literals\n' '***********************\n' '\n' 'Floating point literals are described by the following lexical\n' @@ -4553,8 +4530,7 @@ 'Changed in version 3.6: Underscores are now allowed for ' 'grouping\n' 'purposes in literals.\n', - 'for': '\n' - 'The "for" statement\n' + 'for': 'The "for" statement\n' '*******************\n' '\n' 'The "for" statement is used to iterate over the elements of a ' @@ -4626,8 +4602,7 @@ '\n' ' for x in a[:]:\n' ' if x < 0: a.remove(x)\n', - 'formatstrings': '\n' - 'Format String Syntax\n' + 'formatstrings': 'Format String Syntax\n' '********************\n' '\n' 'The "str.format()" method and the "Formatter" class share ' @@ -5346,8 +5321,7 @@ ' 9 9 11 1001\n' ' 10 A 12 1010\n' ' 11 B 13 1011\n', - 'function': '\n' - 'Function definitions\n' + 'function': 'Function definitions\n' '********************\n' '\n' 'A function definition defines a user-defined function object ' @@ -5516,8 +5490,7 @@ '\n' ' **PEP 3107** - Function Annotations\n' ' The original specification for function annotations.\n', - 'global': '\n' - 'The "global" statement\n' + 'global': 'The "global" statement\n' '**********************\n' '\n' ' global_stmt ::= "global" identifier ("," identifier)*\n' @@ -5561,8 +5534,7 @@ 'code containing the function call. The same applies to the ' '"eval()"\n' 'and "compile()" functions.\n', - 'id-classes': '\n' - 'Reserved classes of identifiers\n' + 'id-classes': 'Reserved classes of identifiers\n' '*******************************\n' '\n' 'Certain classes of identifiers (besides keywords) have ' @@ -5610,8 +5582,7 @@ ' to help avoid name clashes between "private" attributes of ' 'base and\n' ' derived classes. See section Identifiers (Names).\n', - 'identifiers': '\n' - 'Identifiers and keywords\n' + 'identifiers': 'Identifiers and keywords\n' '************************\n' '\n' 'Identifiers (also referred to as *names*) are described by ' @@ -5759,8 +5730,7 @@ ' to help avoid name clashes between "private" attributes of ' 'base and\n' ' derived classes. See section Identifiers (Names).\n', - 'if': '\n' - 'The "if" statement\n' + 'if': 'The "if" statement\n' '******************\n' '\n' 'The "if" statement is used for conditional execution:\n' @@ -5776,8 +5746,7 @@ '(and no other part of the "if" statement is executed or evaluated).\n' 'If all expressions are false, the suite of the "else" clause, if\n' 'present, is executed.\n', - 'imaginary': '\n' - 'Imaginary literals\n' + 'imaginary': 'Imaginary literals\n' '******************\n' '\n' 'Imaginary literals are described by the following lexical ' @@ -5797,8 +5766,7 @@ '\n' ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j ' '3.14_15_93j\n', - 'import': '\n' - 'The "import" statement\n' + 'import': 'The "import" statement\n' '**********************\n' '\n' ' import_stmt ::= "import" module ["as" name] ( "," module ' @@ -6059,8 +6027,7 @@ '\n' ' **PEP 236** - Back to the __future__\n' ' The original proposal for the __future__ mechanism.\n', - 'in': '\n' - 'Membership test operations\n' + 'in': 'Membership test operations\n' '**************************\n' '\n' 'The operators "in" and "not in" test for membership. "x in s"\n' @@ -6095,8 +6062,7 @@ '\n' 'The operator "not in" is defined to have the inverse true value of\n' '"in".\n', - 'integers': '\n' - 'Integer literals\n' + 'integers': 'Integer literals\n' '****************\n' '\n' 'Integer literals are described by the following lexical ' @@ -6142,8 +6108,7 @@ 'Changed in version 3.6: Underscores are now allowed for ' 'grouping\n' 'purposes in literals.\n', - 'lambda': '\n' - 'Lambdas\n' + 'lambda': 'Lambdas\n' '*******\n' '\n' ' lambda_expr ::= "lambda" [parameter_list]: expression\n' @@ -6166,8 +6131,7 @@ 'Note that functions created with lambda expressions cannot ' 'contain\n' 'statements or annotations.\n', - 'lists': '\n' - 'List displays\n' + 'lists': 'List displays\n' '*************\n' '\n' 'A list display is a possibly empty series of expressions enclosed ' @@ -6184,8 +6148,7 @@ 'from left to right and placed into the list object in that order.\n' 'When a comprehension is supplied, the list is constructed from the\n' 'elements resulting from the comprehension.\n', - 'naming': '\n' - 'Naming and binding\n' + 'naming': 'Naming and binding\n' '******************\n' '\n' '\n' @@ -6398,8 +6361,7 @@ 'override the global and local namespace. If only one namespace ' 'is\n' 'specified, it is used for both.\n', - 'nonlocal': '\n' - 'The "nonlocal" statement\n' + 'nonlocal': 'The "nonlocal" statement\n' '************************\n' '\n' ' nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n' @@ -6430,8 +6392,7 @@ '\n' ' **PEP 3104** - Access to Names in Outer Scopes\n' ' The specification for the "nonlocal" statement.\n', - 'numbers': '\n' - 'Numeric literals\n' + 'numbers': 'Numeric literals\n' '****************\n' '\n' 'There are three types of numeric literals: integers, floating ' @@ -6445,8 +6406,7 @@ 'is actually an expression composed of the unary operator \'"-"\' ' 'and the\n' 'literal "1".\n', - 'numeric-types': '\n' - 'Emulating numeric types\n' + 'numeric-types': 'Emulating numeric types\n' '***********************\n' '\n' 'The following methods can be defined to emulate numeric ' @@ -6622,8 +6582,7 @@ ' "__index__()" is defined "__int__()" should also be ' 'defined, and\n' ' both should return the same value.\n', - 'objects': '\n' - 'Objects, values and types\n' + 'objects': 'Objects, values and types\n' '*************************\n' '\n' "*Objects* are Python's abstraction for data. All data in a " @@ -6751,8 +6710,7 @@ 'created empty lists. (Note that "c = d = []" assigns the same ' 'object\n' 'to both "c" and "d".)\n', - 'operator-summary': '\n' - 'Operator precedence\n' + 'operator-summary': 'Operator precedence\n' '*******************\n' '\n' 'The following table summarizes the operator precedence ' @@ -6925,8 +6883,7 @@ 'arithmetic\n' ' or bitwise unary operator on its right, that is, ' '"2**-1" is "0.5".\n', - 'pass': '\n' - 'The "pass" statement\n' + 'pass': 'The "pass" statement\n' '********************\n' '\n' ' pass_stmt ::= "pass"\n' @@ -6939,8 +6896,7 @@ ' def f(arg): pass # a function that does nothing (yet)\n' '\n' ' class C: pass # a class with no methods (yet)\n', - 'power': '\n' - 'The power operator\n' + 'power': 'The power operator\n' '******************\n' '\n' 'The power operator binds more tightly than unary operators on its\n' @@ -6974,8 +6930,7 @@ 'Raising a negative number to a fractional power results in a ' '"complex"\n' 'number. (In earlier versions it raised a "ValueError".)\n', - 'raise': '\n' - 'The "raise" statement\n' + 'raise': 'The "raise" statement\n' '*********************\n' '\n' ' raise_stmt ::= "raise" [expression ["from" expression]]\n' @@ -7060,8 +7015,7 @@ 'Exceptions, and information about handling exceptions is in ' 'section\n' 'The try statement.\n', - 'return': '\n' - 'The "return" statement\n' + 'return': 'The "return" statement\n' '**********************\n' '\n' ' return_stmt ::= "return" [expression_list]\n' @@ -7096,8 +7050,7 @@ '"StopAsyncIteration" to be raised. A non-empty "return" statement ' 'is\n' 'a syntax error in an asynchronous generator function.\n', - 'sequence-types': '\n' - 'Emulating container types\n' + 'sequence-types': 'Emulating container types\n' '*************************\n' '\n' 'The following methods can be defined to implement ' @@ -7318,8 +7271,7 @@ ' iteration protocol via "__getitem__()", see this ' 'section in the\n' ' language reference.\n', - 'shifting': '\n' - 'Shifting operations\n' + 'shifting': 'Shifting operations\n' '*******************\n' '\n' 'The shifting operations have lower priority than the arithmetic\n' @@ -7343,8 +7295,7 @@ 'operand is\n' ' larger than "sys.maxsize" an "OverflowError" exception is ' 'raised.\n', - 'slicings': '\n' - 'Slicings\n' + 'slicings': 'Slicings\n' '********\n' '\n' 'A slicing selects a range of items in a sequence object (e.g., ' @@ -7395,8 +7346,7 @@ 'as lower bound, upper bound and stride, respectively, ' 'substituting\n' '"None" for missing expressions.\n', - 'specialattrs': '\n' - 'Special Attributes\n' + 'specialattrs': 'Special Attributes\n' '******************\n' '\n' 'The implementation adds a few special read-only attributes ' @@ -7481,8 +7431,7 @@ '[5] To format only a tuple you should therefore provide a\n' ' singleton tuple whose only element is the tuple to be ' 'formatted.\n', - 'specialnames': '\n' - 'Special method names\n' + 'specialnames': 'Special method names\n' '********************\n' '\n' 'A class can implement certain operations that are invoked by ' @@ -7843,15 +7792,18 @@ 'on members\n' ' of hashed collections including "set", "frozenset", and ' '"dict".\n' - ' "__hash__()" should return an integer. The only required ' + ' "__hash__()" should return an integer. The only required ' 'property\n' ' is that objects which compare equal have the same hash ' 'value; it is\n' - ' advised to somehow mix together (e.g. using exclusive or) ' - 'the hash\n' - ' values for the components of the object that also play a ' - 'part in\n' - ' comparison of objects.\n' + ' advised to mix together the hash values of the components ' + 'of the\n' + ' object that also play a part in comparison of objects by ' + 'packing\n' + ' them into a tuple and hashing the tuple. Example:\n' + '\n' + ' def __hash__(self):\n' + ' return hash((self.name, self.nick, self.color))\n' '\n' ' Note: "hash()" truncates the value returned from an ' "object's\n" @@ -9270,8 +9222,7 @@ 'special method *must* be set on the class object itself in ' 'order to be\n' 'consistently invoked by the interpreter).\n', - 'string-methods': '\n' - 'String Methods\n' + 'string-methods': 'String Methods\n' '**************\n' '\n' 'Strings implement all of the common sequence operations, ' @@ -9508,12 +9459,11 @@ 'characters\n' ' and there is at least one character, false otherwise. ' 'Decimal\n' - ' characters are those from general category "Nd". This ' - 'category\n' - ' includes digit characters, and all characters that can ' - 'be used to\n' - ' form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC ' - 'DIGIT ZERO.\n' + ' characters are those that can be used to form numbers ' + 'in base 10,\n' + ' e.g. U+0660, ARABIC-INDIC DIGIT ZERO. Formally a ' + 'decimal character\n' + ' is a character in the Unicode General Category "Nd".\n' '\n' 'str.isdigit()\n' '\n' @@ -9523,10 +9473,13 @@ 'include decimal\n' ' characters and digits that need special handling, such ' 'as the\n' - ' compatibility superscript digits. Formally, a digit is ' - 'a character\n' - ' that has the property value Numeric_Type=Digit or\n' - ' Numeric_Type=Decimal.\n' + ' compatibility superscript digits. This covers digits ' + 'which cannot\n' + ' be used to form numbers in base 10, like the Kharosthi ' + 'numbers.\n' + ' Formally, a digit is a character that has the property ' + 'value\n' + ' Numeric_Type=Digit or Numeric_Type=Decimal.\n' '\n' 'str.isidentifier()\n' '\n' @@ -10072,8 +10025,7 @@ " '00042'\n" ' >>> "-42".zfill(5)\n' " '-0042'\n", - 'strings': '\n' - 'String and Bytes literals\n' + 'strings': 'String and Bytes literals\n' '*************************\n' '\n' 'String literals are described by the following lexical ' @@ -10307,8 +10259,7 @@ 'followed by a newline is interpreted as those two characters as ' 'part\n' 'of the literal, *not* as a line continuation.\n', - 'subscriptions': '\n' - 'Subscriptions\n' + 'subscriptions': 'Subscriptions\n' '*************\n' '\n' 'A subscription selects an item of a sequence (string, tuple ' @@ -10365,8 +10316,7 @@ "A string's items are characters. A character is not a " 'separate data\n' 'type but a string of exactly one character.\n', - 'truth': '\n' - 'Truth Value Testing\n' + 'truth': 'Truth Value Testing\n' '*******************\n' '\n' 'Any object can be tested for truth value, for use in an "if" or\n' @@ -10398,8 +10348,7 @@ 'otherwise stated. (Important exception: the Boolean operations ' '"or"\n' 'and "and" always return one of their operands.)\n', - 'try': '\n' - 'The "try" statement\n' + 'try': 'The "try" statement\n' '*******************\n' '\n' 'The "try" statement specifies exception handlers and/or cleanup code\n' @@ -10546,8 +10495,7 @@ 'Exceptions, and information on using the "raise" statement to ' 'generate\n' 'exceptions may be found in section The raise statement.\n', - 'types': '\n' - 'The standard type hierarchy\n' + 'types': 'The standard type hierarchy\n' '***************************\n' '\n' 'Below is a list of the types that are built into Python. ' @@ -11262,14 +11210,14 @@ 'the\n' ' dictionary containing the class\'s namespace; "__bases__" is a ' 'tuple\n' - ' (possibly empty or a singleton) containing the base classes, in ' - 'the\n' - ' order of their occurrence in the base class list; "__doc__" is ' - 'the\n' - ' class\'s documentation string, or "None" if undefined;\n' - ' "__annotations__" (optional) is a dictionary containing ' - '*variable\n' - ' annotations* collected during class body execution.\n' + ' containing the base classes, in the order of their occurrence ' + 'in\n' + ' the base class list; "__doc__" is the class\'s documentation ' + 'string,\n' + ' or "None" if undefined; "__annotations__" (optional) is a\n' + ' dictionary containing *variable annotations* collected during ' + 'class\n' + ' body execution.\n' '\n' 'Class instances\n' ' A class instance is created by calling a class object (see ' @@ -11549,8 +11497,7 @@ ' under "User-defined methods". Class method objects are ' 'created\n' ' by the built-in "classmethod()" constructor.\n', - 'typesfunctions': '\n' - 'Functions\n' + 'typesfunctions': 'Functions\n' '*********\n' '\n' 'Function objects are created by function definitions. The ' @@ -11567,8 +11514,7 @@ 'different object types.\n' '\n' 'See Function definitions for more information.\n', - 'typesmapping': '\n' - 'Mapping Types --- "dict"\n' + 'typesmapping': 'Mapping Types --- "dict"\n' '************************\n' '\n' 'A *mapping* object maps *hashable* values to arbitrary ' @@ -11925,8 +11871,7 @@ " {'bacon'}\n" " >>> keys ^ {'sausage', 'juice'}\n" " {'juice', 'sausage', 'bacon', 'spam'}\n", - 'typesmethods': '\n' - 'Methods\n' + 'typesmethods': 'Methods\n' '*******\n' '\n' 'Methods are functions that are called using the attribute ' @@ -11983,8 +11928,7 @@ " 'my name is method'\n" '\n' 'See The standard type hierarchy for more information.\n', - 'typesmodules': '\n' - 'Modules\n' + 'typesmodules': 'Modules\n' '*******\n' '\n' 'The only special operation on a module is attribute access: ' @@ -12021,8 +11965,7 @@ 'written as\n' '"".\n', - 'typesseq': '\n' - 'Sequence Types --- "list", "tuple", "range"\n' + 'typesseq': 'Sequence Types --- "list", "tuple", "range"\n' '*******************************************\n' '\n' 'There are three basic sequence types: lists, tuples, and range\n' @@ -12170,9 +12113,9 @@ '\n' '3. If *i* or *j* is negative, the index is relative to the end ' 'of\n' - ' the string: "len(s) + i" or "len(s) + j" is substituted. But ' - 'note\n' - ' that "-0" is still "0".\n' + ' sequence *s*: "len(s) + i" or "len(s) + j" is substituted. ' + 'But\n' + ' note that "-0" is still "0".\n' '\n' '4. The slice of *s* from *i* to *j* is defined as the sequence ' 'of\n' @@ -12191,12 +12134,17 @@ ' (j-i)/k". In other words, the indices are "i", "i+k", ' '"i+2*k",\n' ' "i+3*k" and so on, stopping when *j* is reached (but never\n' - ' including *j*). If *i* or *j* is greater than "len(s)", use\n' - ' "len(s)". If *i* or *j* are omitted or "None", they become ' - '"end"\n' - ' values (which end depends on the sign of *k*). Note, *k* ' - 'cannot be\n' - ' zero. If *k* is "None", it is treated like "1".\n' + ' including *j*). When *k* is positive, *i* and *j* are ' + 'reduced to\n' + ' "len(s)" if they are greater. When *k* is negative, *i* and ' + '*j* are\n' + ' reduced to "len(s) - 1" if they are greater. If *i* or *j* ' + 'are\n' + ' omitted or "None", they become "end" values (which end ' + 'depends on\n' + ' the sign of *k*). Note, *k* cannot be zero. If *k* is ' + '"None", it\n' + ' is treated like "1".\n' '\n' '6. Concatenating immutable sequences always results in a new\n' ' object. This means that building up a sequence by repeated\n' @@ -12714,8 +12662,7 @@ ' * The linspace recipe shows how to implement a lazy version ' 'of\n' ' range that suitable for floating point applications.\n', - 'typesseq-mutable': '\n' - 'Mutable Sequence Types\n' + 'typesseq-mutable': 'Mutable Sequence Types\n' '**********************\n' '\n' 'The operations in the following table are defined on ' @@ -12855,8 +12802,7 @@ 'referenced multiple\n' ' times, as explained for "s * n" under Common Sequence ' 'Operations.\n', - 'unary': '\n' - 'Unary arithmetic and bitwise operations\n' + 'unary': 'Unary arithmetic and bitwise operations\n' '***************************************\n' '\n' 'All unary arithmetic and bitwise operations have the same ' @@ -12878,8 +12824,7 @@ 'In all three cases, if the argument does not have the proper type, ' 'a\n' '"TypeError" exception is raised.\n', - 'while': '\n' - 'The "while" statement\n' + 'while': 'The "while" statement\n' '*********************\n' '\n' 'The "while" statement is used for repeated execution as long as an\n' @@ -12903,8 +12848,7 @@ 'executed in the first suite skips the rest of the suite and goes ' 'back\n' 'to testing the expression.\n', - 'with': '\n' - 'The "with" statement\n' + 'with': 'The "with" statement\n' '********************\n' '\n' 'The "with" statement is used to wrap the execution of a block with\n' @@ -12977,8 +12921,7 @@ ' The specification, background, and examples for the Python ' '"with"\n' ' statement.\n', - 'yield': '\n' - 'The "yield" statement\n' + 'yield': 'The "yield" statement\n' '*********************\n' '\n' ' yield_stmt ::= yield_expression\n' diff --git a/lib-python/3/random.py b/lib-python/3/random.py --- a/lib-python/3/random.py +++ b/lib-python/3/random.py @@ -254,7 +254,7 @@ try: i = self._randbelow(len(seq)) except ValueError: - raise IndexError('Cannot choose from an empty sequence') + raise IndexError('Cannot choose from an empty sequence') from None return seq[i] def shuffle(self, x, random=None): diff --git a/lib-python/3/secrets.py b/lib-python/3/secrets.py --- a/lib-python/3/secrets.py +++ b/lib-python/3/secrets.py @@ -26,6 +26,8 @@ def randbelow(exclusive_upper_bound): """Return a random int in the range [0, n).""" + if exclusive_upper_bound <= 0: + raise ValueError("Upper bound must be positive.") return _sysrand._randbelow(exclusive_upper_bound) DEFAULT_ENTROPY = 32 # number of bytes to return by default diff --git a/lib-python/3/shlex.py b/lib-python/3/shlex.py --- a/lib-python/3/shlex.py +++ b/lib-python/3/shlex.py @@ -232,11 +232,6 @@ break # emit current token else: continue - elif self.posix and nextchar in self.quotes: - self.state = nextchar - elif self.posix and nextchar in self.escape: - escapedstate = 'a' - self.state = nextchar elif self.state == 'c': if nextchar in self.punctuation_chars: self.token += nextchar @@ -245,6 +240,11 @@ self._pushback_chars.append(nextchar) self.state = ' ' break + elif self.posix and nextchar in self.quotes: + self.state = nextchar + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar elif (nextchar in self.wordchars or nextchar in self.quotes or self.whitespace_split): self.token += nextchar diff --git a/lib-python/3/shutil.py b/lib-python/3/shutil.py --- a/lib-python/3/shutil.py +++ b/lib-python/3/shutil.py @@ -10,7 +10,13 @@ import fnmatch import collections import errno -import tarfile + +try: + import zlib + del zlib + _ZLIB_SUPPORTED = True +except ImportError: + _ZLIB_SUPPORTED = False try: import bz2 @@ -602,23 +608,22 @@ Returns the output filename. """ - tar_compression = {'gzip': 'gz', None: ''} - compress_ext = {'gzip': '.gz'} - - if _BZ2_SUPPORTED: - tar_compression['bzip2'] = 'bz2' - compress_ext['bzip2'] = '.bz2' - - if _LZMA_SUPPORTED: - tar_compression['xz'] = 'xz' - compress_ext['xz'] = '.xz' - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext: + if compress is None: + tar_compression = '' + elif _ZLIB_SUPPORTED and compress == 'gzip': + tar_compression = 'gz' + elif _BZ2_SUPPORTED and compress == 'bzip2': + tar_compression = 'bz2' + elif _LZMA_SUPPORTED and compress == 'xz': + tar_compression = 'xz' + else: raise ValueError("bad value for 'compress', or compression format not " "supported : {0}".format(compress)) - archive_name = base_name + '.tar' + compress_ext.get(compress, '') + import tarfile # late import for breaking circular dependency + + compress_ext = '.' + tar_compression if compress else '' + archive_name = base_name + '.tar' + compress_ext archive_dir = os.path.dirname(archive_name) if archive_dir and not os.path.exists(archive_dir): @@ -644,7 +649,7 @@ return tarinfo if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) From pypy.commits at gmail.com Thu Jul 13 10:50:02 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 13 Jul 2017 07:50:02 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Update the stdlib (merge branch vendor/stdlib-3.6) Message-ID: <5967889a.150e1c0a.9d105.0989@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r91866:7b7912136af7 Date: 2017-07-13 16:48 +0200 http://bitbucket.org/pypy/pypy/changeset/7b7912136af7/ Log: Update the stdlib (merge branch vendor/stdlib-3.6) diff too long, truncating to 2000 out of 7633 lines diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -277,7 +277,7 @@ try: UnsupportedOperation = io.UnsupportedOperation except AttributeError: - class UnsupportedOperation(ValueError, OSError): + class UnsupportedOperation(OSError, ValueError): pass diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -303,6 +303,8 @@ # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk # _framesize -- size of one frame in the file + _file = None # Set here since __del__ checks it + def initfp(self, file): self._version = 0 self._convert = None @@ -344,9 +346,15 @@ def __init__(self, f): if isinstance(f, str): - f = builtins.open(f, 'rb') - # else, assume it is an open file object already - self.initfp(f) + file_object = builtins.open(f, 'rb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + else: + # assume it is an open file object already + self.initfp(f) def __enter__(self): return self @@ -541,18 +549,23 @@ # _datalength -- the size of the audio samples written to the header # _datawritten -- the size of the audio samples actually written + _file = None # Set here since __del__ checks it + def __init__(self, f): if isinstance(f, str): - filename = f - f = builtins.open(f, 'wb') + file_object = builtins.open(f, 'wb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + + # treat .aiff file extensions as non-compressed audio + if f.endswith('.aiff'): + self._aifc = 0 else: - # else, assume it is an open file object already - filename = '???' - self.initfp(f) - if filename[-5:] == '.aiff': - self._aifc = 0 - else: - self._aifc = 1 + # assume it is an open file object already + self.initfp(f) def initfp(self, file): self._file = file diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -182,7 +182,7 @@ self._root_section = self._Section(self, None) self._current_section = self._root_section - self._whitespace_matcher = _re.compile(r'\s+') + self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII) self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -11,6 +11,7 @@ import functools import inspect +import os import reprlib import socket import subprocess @@ -611,6 +612,9 @@ # A TLS for the running event loop, used by _get_running_loop. class _RunningLoop(threading.local): _loop = None + _pid = None + + _running_loop = _RunningLoop() @@ -620,7 +624,9 @@ This is a low-level function intended to be used by event loops. This function is thread-specific. """ - return _running_loop._loop + running_loop = _running_loop._loop + if running_loop is not None and _running_loop._pid == os.getpid(): + return running_loop def _set_running_loop(loop): @@ -629,6 +635,7 @@ This is a low-level function intended to be used by event loops. This function is thread-specific. """ + _running_loop._pid = os.getpid() _running_loop._loop = loop diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py --- a/lib-python/3/asyncio/subprocess.py +++ b/lib-python/3/asyncio/subprocess.py @@ -24,6 +24,8 @@ self._limit = limit self.stdin = self.stdout = self.stderr = None self._transport = None + self._process_exited = False + self._pipe_fds = [] def __repr__(self): info = [self.__class__.__name__] @@ -43,12 +45,14 @@ self.stdout = streams.StreamReader(limit=self._limit, loop=self._loop) self.stdout.set_transport(stdout_transport) + self._pipe_fds.append(1) stderr_transport = transport.get_pipe_transport(2) if stderr_transport is not None: self.stderr = streams.StreamReader(limit=self._limit, loop=self._loop) self.stderr.set_transport(stderr_transport) + self._pipe_fds.append(2) stdin_transport = transport.get_pipe_transport(0) if stdin_transport is not None: @@ -86,9 +90,18 @@ else: reader.set_exception(exc) + if fd in self._pipe_fds: + self._pipe_fds.remove(fd) + self._maybe_close_transport() + def process_exited(self): - self._transport.close() - self._transport = None + self._process_exited = True + self._maybe_close_transport() + + def _maybe_close_transport(self): + if len(self._pipe_fds) == 0 and self._process_exited: + self._transport.close() + self._transport = None class Process: diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py --- a/lib-python/3/asyncio/tasks.py +++ b/lib-python/3/asyncio/tasks.py @@ -487,7 +487,8 @@ """ warnings.warn("asyncio.async() function is deprecated, use ensure_future()", - DeprecationWarning) + DeprecationWarning, + stacklevel=2) return ensure_future(coro_or_future, loop=loop) diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py --- a/lib-python/3/asyncio/test_utils.py +++ b/lib-python/3/asyncio/test_utils.py @@ -449,12 +449,15 @@ self.set_event_loop(loop) return loop + def unpatch_get_running_loop(self): + events._get_running_loop = self._get_running_loop + def setUp(self): self._get_running_loop = events._get_running_loop events._get_running_loop = lambda: None def tearDown(self): - events._get_running_loop = self._get_running_loop + self.unpatch_get_running_loop() events.set_event_loop(None) diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -541,7 +541,8 @@ def encodestring(s): """Legacy alias of encodebytes().""" import warnings - warnings.warn("encodestring() is a deprecated alias, use encodebytes()", + warnings.warn("encodestring() is a deprecated alias since 3.1, " + "use encodebytes()", DeprecationWarning, 2) return encodebytes(s) @@ -554,7 +555,8 @@ def decodestring(s): """Legacy alias of decodebytes().""" import warnings - warnings.warn("decodestring() is a deprecated alias, use decodebytes()", + warnings.warn("decodestring() is a deprecated alias since Python 3.1, " + "use decodebytes()", DeprecationWarning, 2) return decodebytes(s) diff --git a/lib-python/3/collections/__init__.py b/lib-python/3/collections/__init__.py --- a/lib-python/3/collections/__init__.py +++ b/lib-python/3/collections/__init__.py @@ -189,6 +189,7 @@ link = self.__map[key] link_prev = link.prev link_next = link.next + soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root @@ -196,12 +197,14 @@ last = root.prev link.prev = last link.next = root - last.next = root.prev = link + root.prev = soft_link + last.next = link else: first = root.next link.prev = root link.next = first - root.next = first.prev = link + first.prev = soft_link + root.next = link def __sizeof__(self): sizeof = _sys.getsizeof diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -143,6 +143,7 @@ import functools import io import itertools +import os import re import sys import warnings @@ -687,7 +688,7 @@ Return list of successfully read files. """ - if isinstance(filenames, str): + if isinstance(filenames, (str, os.PathLike)): filenames = [filenames] read_ok = [] for filename in filenames: @@ -696,6 +697,8 @@ self._read(fp, filename) except OSError: continue + if isinstance(filename, os.PathLike): + filename = os.fspath(filename) read_ok.append(filename) return read_ok diff --git a/lib-python/3/contextlib.py b/lib-python/3/contextlib.py --- a/lib-python/3/contextlib.py +++ b/lib-python/3/contextlib.py @@ -105,7 +105,7 @@ # raised inside the "with" statement from being suppressed. return exc is not value except RuntimeError as exc: - # Don't re-raise the passed in exception. (issue27112) + # Don't re-raise the passed in exception. (issue27122) if exc is value: return False # Likewise, avoid suppressing if a StopIteration exception diff --git a/lib-python/3/ctypes/__init__.py b/lib-python/3/ctypes/__init__.py --- a/lib-python/3/ctypes/__init__.py +++ b/lib-python/3/ctypes/__init__.py @@ -325,6 +325,10 @@ """ _func_flags_ = _FUNCFLAG_CDECL _func_restype_ = c_int + # default values for repr + _name = '' + _handle = 0 + _FuncPtr = None def __init__(self, name, mode=DEFAULT_MODE, handle=None, use_errno=False, diff --git a/lib-python/3/ctypes/test/test_callbacks.py b/lib-python/3/ctypes/test/test_callbacks.py --- a/lib-python/3/ctypes/test/test_callbacks.py +++ b/lib-python/3/ctypes/test/test_callbacks.py @@ -246,6 +246,7 @@ def test_callback_large_struct(self): class Check: pass + # This should mirror the structure in Modules/_ctypes/_ctypes_test.c class X(Structure): _fields_ = [ ('first', c_ulong), @@ -257,6 +258,11 @@ check.first = s.first check.second = s.second check.third = s.third + # See issue #29565. + # The structure should be passed by value, so + # any changes to it should not be reflected in + # the value passed + s.first = s.second = s.third = 0x0badf00d check = Check() s = X() @@ -277,6 +283,11 @@ self.assertEqual(check.first, 0xdeadbeef) self.assertEqual(check.second, 0xcafebabe) self.assertEqual(check.third, 0x0bad1dea) + # See issue #29565. + # Ensure that the original struct is unchanged. + self.assertEqual(s.first, check.first) + self.assertEqual(s.second, check.second) + self.assertEqual(s.third, check.third) ################################################################ diff --git a/lib-python/3/ctypes/test/test_structures.py b/lib-python/3/ctypes/test/test_structures.py --- a/lib-python/3/ctypes/test/test_structures.py +++ b/lib-python/3/ctypes/test/test_structures.py @@ -3,6 +3,7 @@ from ctypes.test import need_symbol from struct import calcsize import _testcapi +import _ctypes_test class SubclassesTest(unittest.TestCase): def test_subclass(self): @@ -391,6 +392,28 @@ (1, 0, 0, 0, 0, 0)) self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7)) + def test_pass_by_value(self): + # This should mirror the structure in Modules/_ctypes/_ctypes_test.c + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_large_struct_update_value + func.argtypes = (X,) + func.restype = None + func(s) + self.assertEqual(s.first, 0xdeadbeef) + self.assertEqual(s.second, 0xcafebabe) + self.assertEqual(s.third, 0x0bad1dea) + class PointerMemberTestCase(unittest.TestCase): def test(self): diff --git a/lib-python/3/curses/ascii.py b/lib-python/3/curses/ascii.py --- a/lib-python/3/curses/ascii.py +++ b/lib-python/3/curses/ascii.py @@ -53,19 +53,19 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) -def isascii(c): return _ctoi(c) <= 127 # ? +def isascii(c): return 0 <= _ctoi(c) <= 127 # ? def isblank(c): return _ctoi(c) in (9, 32) -def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 -def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 -def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 -def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 -def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 +def iscntrl(c): return 0 <= _ctoi(c) <= 31 or _ctoi(c) == 127 +def isdigit(c): return 48 <= _ctoi(c) <= 57 +def isgraph(c): return 33 <= _ctoi(c) <= 126 +def islower(c): return 97 <= _ctoi(c) <= 122 +def isprint(c): return 32 <= _ctoi(c) <= 126 def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) -def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 +def isupper(c): return 65 <= _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ - (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102) -def isctrl(c): return _ctoi(c) < 32 + (65 <= _ctoi(c) <= 70) or (97 <= _ctoi(c) <= 102) +def isctrl(c): return 0 <= _ctoi(c) < 32 def ismeta(c): return _ctoi(c) > 127 def ascii(c): diff --git a/lib-python/3/curses/textpad.py b/lib-python/3/curses/textpad.py --- a/lib-python/3/curses/textpad.py +++ b/lib-python/3/curses/textpad.py @@ -43,16 +43,20 @@ def __init__(self, win, insert_mode=False): self.win = win self.insert_mode = insert_mode - (self.maxy, self.maxx) = win.getmaxyx() - self.maxy = self.maxy - 1 - self.maxx = self.maxx - 1 + self._update_max_yx() self.stripspaces = 1 self.lastcmd = None win.keypad(1) + def _update_max_yx(self): + maxy, maxx = self.win.getmaxyx() + self.maxy = maxy - 1 + self.maxx = maxx - 1 + def _end_of_line(self, y): """Go to the location of the first blank on the given line, returning the index of the last non-blank character.""" + self._update_max_yx() last = self.maxx while True: if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP: @@ -64,8 +68,10 @@ return last def _insert_printable_char(self, ch): + self._update_max_yx() (y, x) = self.win.getyx() - if y < self.maxy or x < self.maxx: + backyx = None + while y < self.maxy or x < self.maxx: if self.insert_mode: oldch = self.win.inch() # The try-catch ignores the error we trigger from some curses @@ -75,14 +81,20 @@ self.win.addch(ch) except curses.error: pass - if self.insert_mode: - (backy, backx) = self.win.getyx() - if curses.ascii.isprint(oldch): - self._insert_printable_char(oldch) - self.win.move(backy, backx) + if not self.insert_mode or not curses.ascii.isprint(oldch): + break + ch = oldch + (y, x) = self.win.getyx() + # Remember where to put the cursor back since we are in insert_mode + if backyx is None: + backyx = y, x + + if backyx is not None: + self.win.move(*backyx) def do_command(self, ch): "Process a single editing command." + self._update_max_yx() (y, x) = self.win.getyx() self.lastcmd = ch if curses.ascii.isprint(ch): @@ -148,6 +160,7 @@ def gather(self): "Collect and return the contents of the window." result = "" + self._update_max_yx() for y in range(self.maxy+1): self.win.move(y, 0) stop = self._end_of_line(y) diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1053,7 +1053,7 @@ hour, minute (required) second, microsecond (default to zero) tzinfo (default to None) - fold (keyword only, default to True) + fold (keyword only, default to zero) """ if isinstance(hour, bytes) and len(hour) == 6 and hour[0]&0x7F < 24: # Pickle support diff --git a/lib-python/3/dbm/dumb.py b/lib-python/3/dbm/dumb.py --- a/lib-python/3/dbm/dumb.py +++ b/lib-python/3/dbm/dumb.py @@ -97,8 +97,9 @@ try: f = _io.open(self._dirfile, 'r', encoding="Latin-1") except OSError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -113,7 +114,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -197,6 +198,7 @@ elif not isinstance(val, (bytes, bytearray)): raise TypeError("values must be bytes or strings") self._verify_open() + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -229,6 +231,7 @@ if isinstance(key, str): key = key.encode('utf-8') self._verify_open() + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always diff --git a/lib-python/3/distutils/command/wininst-14.0-amd64.exe b/lib-python/3/distutils/command/wininst-14.0-amd64.exe index 22299543a97ffc1525a3b1c778cb158d6c6430ad..253c2e2eccefa79393827f44f85680536906574a GIT binary patch [cut] diff --git a/lib-python/3/distutils/command/wininst-14.0.exe b/lib-python/3/distutils/command/wininst-14.0.exe index 0dac1103d98db0af1e9027c41fe921136c5f6396..46f5f356676c800f99742deb6bf4c0a96aa166c0 GIT binary patch [cut] diff --git a/lib-python/3/distutils/tests/test_bdist_rpm.py b/lib-python/3/distutils/tests/test_bdist_rpm.py --- a/lib-python/3/distutils/tests/test_bdist_rpm.py +++ b/lib-python/3/distutils/tests/test_bdist_rpm.py @@ -94,7 +94,7 @@ @unittest.skipIf(find_executable('rpmbuild') is None, 'the rpmbuild command is not found') def test_no_optimize_flag(self): - # let's create a package that brakes bdist_rpm + # let's create a package that breaks bdist_rpm tmp_dir = self.mkdtemp() os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation pkg_dir = os.path.join(tmp_dir, 'foo') diff --git a/lib-python/3/enum.py b/lib-python/3/enum.py --- a/lib-python/3/enum.py +++ b/lib-python/3/enum.py @@ -1,7 +1,7 @@ import sys from types import MappingProxyType, DynamicClassAttribute from functools import reduce -from operator import or_ as _or_, and_ as _and_, xor, neg +from operator import or_ as _or_ # try _collections first to reduce startup cost try: @@ -690,7 +690,9 @@ pseudo_member = object.__new__(cls) pseudo_member._name_ = None pseudo_member._value_ = value - cls._value2member_map_[value] = pseudo_member + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member def __contains__(self, other): @@ -785,7 +787,9 @@ pseudo_member = int.__new__(cls, value) pseudo_member._name_ = None pseudo_member._value_ = value - cls._value2member_map_[value] = pseudo_member + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member def __or__(self, other): @@ -835,18 +839,21 @@ # _decompose is only called if the value is not named not_covered = value negative = value < 0 + # issue29167: wrap accesses to _value2member_map_ in a list to avoid race + # conditions between iterating over it and having more psuedo- + # members added to it if negative: # only check for named flags flags_to_check = [ (m, v) - for v, m in flag._value2member_map_.items() + for v, m in list(flag._value2member_map_.items()) if m.name is not None ] else: # check for named flags and powers-of-two flags flags_to_check = [ (m, v) - for v, m in flag._value2member_map_.items() + for v, m in list(flag._value2member_map_.items()) if m.name is not None or _power_of_two(v) ] members = [] diff --git a/lib-python/3/functools.py b/lib-python/3/functools.py --- a/lib-python/3/functools.py +++ b/lib-python/3/functools.py @@ -421,7 +421,7 @@ def _make_key(args, kwds, typed, kwd_mark = (object(),), fasttypes = {int, str, frozenset, type(None)}, - sorted=sorted, tuple=tuple, type=type, len=len): + tuple=tuple, type=type, len=len): """Make a cache key from optionally typed positional and keyword arguments The key is constructed in a way that is flat as possible rather than @@ -434,14 +434,13 @@ """ key = args if kwds: - sorted_items = sorted(kwds.items()) key += kwd_mark - for item in sorted_items: + for item in kwds.items(): key += item if typed: key += tuple(type(v) for v in args) if kwds: - key += tuple(type(v) for k, v in sorted_items) + key += tuple(type(v) for v in kwds.values()) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) @@ -493,6 +492,7 @@ hits = misses = 0 full = False cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() lock = RLock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list root[:] = [root, root, None, None] # initialize by pointing to self @@ -574,14 +574,16 @@ last = root[PREV] link = [last, root, key, result] last[NEXT] = root[PREV] = cache[key] = link - full = (len(cache) >= maxsize) + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = (cache_len() >= maxsize) misses += 1 return result def cache_info(): """Report cache statistics""" with lock: - return _CacheInfo(hits, misses, maxsize, len(cache)) + return _CacheInfo(hits, misses, maxsize, cache_len()) def cache_clear(): """Clear the cache and cache statistics""" diff --git a/lib-python/3/getpass.py b/lib-python/3/getpass.py --- a/lib-python/3/getpass.py +++ b/lib-python/3/getpass.py @@ -7,7 +7,6 @@ echoing of the password contents while reading. On Windows, the msvcrt module will be used. -On the Mac EasyDialogs.AskPassword is used, if available. """ diff --git a/lib-python/3/idlelib/colorizer.py b/lib-python/3/idlelib/colorizer.py --- a/lib-python/3/idlelib/colorizer.py +++ b/lib-python/3/idlelib/colorizer.py @@ -21,7 +21,7 @@ # 1st 'file' colorized normal, 2nd as builtin, 3rd as string builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b" comment = any("COMMENT", [r"#[^\n]*"]) - stringprefix = r"(\br|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR|rb|rB|Rb|RB)?" + stringprefix = r"(?i:\br|u|f|fr|rf|b|br|rb)?" sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?' sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" @@ -261,8 +261,15 @@ top = Toplevel(parent) top.title("Test ColorDelegator") x, y = map(int, parent.geometry().split('+')[1:]) - top.geometry("200x100+%d+%d" % (x + 250, y + 175)) - source = "if somename: x = 'abc' # comment\nprint\n" + top.geometry("700x250+%d+%d" % (x + 20, y + 175)) + source = ("# Following has syntax errors\n" + "if True: then int 1\nelif False: print 0\nelse: float(None)\n" + "if iF + If + IF: 'keywork matching must respect case'\n" + "# All valid prefixes for unicode and byte strings should be colored\n" + "'x', '''x''', \"x\", \"\"\"x\"\"\"\n" + "r'x', u'x', R'x', U'x', f'x', F'x', ur'is invalid'\n" + "fr'x', Fr'x', fR'x', FR'x', rf'x', rF'x', Rf'x', RF'x'\n" + "b'x',B'x', br'x',Br'x',bR'x',BR'x', rb'x'.rB'x',Rb'x',RB'x'\n") text = Text(top, background="white") text.pack(expand=1, fill="both") text.insert("insert", source) diff --git a/lib-python/3/idlelib/help.html b/lib-python/3/idlelib/help.html --- a/lib-python/3/idlelib/help.html +++ b/lib-python/3/idlelib/help.html @@ -90,7 +90,7 @@

    25.5. IDLE

    -

    Source code: Lib/idlelib/

    +

    Source code: Lib/idlelib/


    IDLE is Python’s Integrated Development and Learning Environment.

    IDLE has the following features:

    diff --git a/lib-python/3/imaplib.py b/lib-python/3/imaplib.py --- a/lib-python/3/imaplib.py +++ b/lib-python/3/imaplib.py @@ -419,7 +419,7 @@ self.literal = _Authenticator(authobject).process typ, dat = self._simple_command('AUTHENTICATE', mech) if typ != 'OK': - raise self.error(dat[-1]) + raise self.error(dat[-1].decode('utf-8', 'replace')) self.state = 'AUTH' return typ, dat diff --git a/lib-python/3/importlib/_bootstrap_external.py b/lib-python/3/importlib/_bootstrap_external.py --- a/lib-python/3/importlib/_bootstrap_external.py +++ b/lib-python/3/importlib/_bootstrap_external.py @@ -1447,6 +1447,4 @@ _setup(_bootstrap_module) supported_loaders = _get_supported_file_loaders() sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) - if _os.__name__ == 'nt': - sys.meta_path.append(WindowsRegistryFinder) sys.meta_path.append(PathFinder) diff --git a/lib-python/3/inspect.py b/lib-python/3/inspect.py --- a/lib-python/3/inspect.py +++ b/lib-python/3/inspect.py @@ -1422,7 +1422,6 @@ except OSError: lines = index = None else: - start = max(start, 1) start = max(0, min(start, len(lines) - context)) lines = lines[start:start+context] index = lineno - 1 - start diff --git a/lib-python/3/logging/__init__.py b/lib-python/3/logging/__init__.py --- a/lib-python/3/logging/__init__.py +++ b/lib-python/3/logging/__init__.py @@ -131,9 +131,14 @@ Otherwise, the string "Level %s" % level is returned. """ - # See Issues #22386 and #27937 for why it's this way - return (_levelToName.get(level) or _nameToLevel.get(level) or - "Level %s" % level) + # See Issues #22386, #27937 and #29220 for why it's this way + result = _levelToName.get(level) + if result is not None: + return result + result = _nameToLevel.get(level) + if result is not None: + return result + return "Level %s" % level def addLevelName(level, levelName): """ diff --git a/lib-python/3/mailbox.py b/lib-python/3/mailbox.py --- a/lib-python/3/mailbox.py +++ b/lib-python/3/mailbox.py @@ -313,11 +313,12 @@ # final position in order to prevent race conditions with changes # from other programs try: - if hasattr(os, 'link'): + try: os.link(tmp_file.name, dest) + except (AttributeError, PermissionError): + os.rename(tmp_file.name, dest) + else: os.remove(tmp_file.name) - else: - os.rename(tmp_file.name, dest) except OSError as e: os.remove(tmp_file.name) if e.errno == errno.EEXIST: @@ -1200,13 +1201,14 @@ for key in self.iterkeys(): if key - 1 != prev: changes.append((key, prev + 1)) - if hasattr(os, 'link'): + try: os.link(os.path.join(self._path, str(key)), os.path.join(self._path, str(prev + 1))) - os.unlink(os.path.join(self._path, str(key))) - else: + except (AttributeError, PermissionError): os.rename(os.path.join(self._path, str(key)), os.path.join(self._path, str(prev + 1))) + else: + os.unlink(os.path.join(self._path, str(key))) prev += 1 self._next_key = prev + 1 if len(changes) == 0: @@ -2076,13 +2078,14 @@ else: raise try: - if hasattr(os, 'link'): + try: os.link(pre_lock.name, f.name + '.lock') dotlock_done = True - os.unlink(pre_lock.name) - else: + except (AttributeError, PermissionError): os.rename(pre_lock.name, f.name + '.lock') dotlock_done = True + else: + os.unlink(pre_lock.name) except FileExistsError: os.remove(pre_lock.name) raise ExternalClashError('dot lock unavailable: %s' % diff --git a/lib-python/3/multiprocessing/context.py b/lib-python/3/multiprocessing/context.py --- a/lib-python/3/multiprocessing/context.py +++ b/lib-python/3/multiprocessing/context.py @@ -196,7 +196,7 @@ def get_start_method(self, allow_none=False): return self._name - def set_start_method(self, method=None): + def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property diff --git a/lib-python/3/multiprocessing/spawn.py b/lib-python/3/multiprocessing/spawn.py --- a/lib-python/3/multiprocessing/spawn.py +++ b/lib-python/3/multiprocessing/spawn.py @@ -217,7 +217,7 @@ process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: - set_start_method(data['start_method']) + set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) diff --git a/lib-python/3/pathlib.py b/lib-python/3/pathlib.py --- a/lib-python/3/pathlib.py +++ b/lib-python/3/pathlib.py @@ -192,7 +192,9 @@ s = self._ext_to_normal(_getfinalpathname(s)) except FileNotFoundError: previous_s = s - s = os.path.abspath(os.path.join(s, os.pardir)) + s = os.path.dirname(s) + if previous_s == s: + return path else: if previous_s is None: return s @@ -1233,7 +1235,7 @@ if not exist_ok or not self.is_dir(): raise except OSError as e: - if e.errno != ENOENT: + if e.errno != ENOENT or self.parent == self: raise self.parent.mkdir(parents=True) self._accessor.mkdir(self, mode) diff --git a/lib-python/3/platform.py b/lib-python/3/platform.py --- a/lib-python/3/platform.py +++ b/lib-python/3/platform.py @@ -110,7 +110,7 @@ """ -__version__ = '1.0.7' +__version__ = '1.0.8' import collections import sys, os, re, subprocess @@ -1198,7 +1198,9 @@ elif buildtime: builddate = builddate + ' ' + buildtime - if hasattr(sys, '_mercurial'): + if hasattr(sys, '_git'): + _, branch, revision = sys._git + elif hasattr(sys, '_mercurial'): _, branch, revision = sys._mercurial elif hasattr(sys, 'subversion'): # sys.subversion was added in Python 2.5 diff --git a/lib-python/3/pstats.py b/lib-python/3/pstats.py --- a/lib-python/3/pstats.py +++ b/lib-python/3/pstats.py @@ -48,11 +48,14 @@ printed. The sort_stats() method now processes some additional options (i.e., in - addition to the old -1, 0, 1, or 2). It takes an arbitrary number of - quoted strings to select the sort order. For example sort_stats('time', - 'name') sorts on the major key of 'internal function time', and on the - minor key of 'the name of the function'. Look at the two tables in - sort_stats() and get_sort_arg_defs(self) for more examples. + addition to the old -1, 0, 1, or 2 that are respectively interpreted as + 'stdname', 'calls', 'time', and 'cumulative'). It takes an arbitrary number + of quoted strings to select the sort order. + + For example sort_stats('time', 'name') sorts on the major key of 'internal + function time', and on the minor key of 'the name of the function'. Look at + the two tables in sort_stats() and get_sort_arg_defs(self) for more + examples. All methods return self, so you can string together commands like: Stats('foo', 'goo').strip_dirs().sort_stats('calls').\ diff --git a/lib-python/3/pydoc_data/topics.py b/lib-python/3/pydoc_data/topics.py --- a/lib-python/3/pydoc_data/topics.py +++ b/lib-python/3/pydoc_data/topics.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Fri Dec 16 16:33:16 2016 -topics = {'assert': '\n' - 'The "assert" statement\n' +# Autogenerated by Sphinx on Sat Mar 4 12:14:44 2017 +topics = {'assert': 'The "assert" statement\n' '**********************\n' '\n' 'Assert statements are a convenient way to insert debugging ' @@ -39,8 +38,7 @@ 'Assignments to "__debug__" are illegal. The value for the ' 'built-in\n' 'variable is determined when the interpreter starts.\n', - 'assignment': '\n' - 'Assignment statements\n' + 'assignment': 'Assignment statements\n' '*********************\n' '\n' 'Assignment statements are used to (re)bind names to values and ' @@ -405,8 +403,7 @@ 'See also: **PEP 526** - Variable and attribute annotation ' 'syntax\n' ' **PEP 484** - Type hints\n', - 'atom-identifiers': '\n' - 'Identifiers (Names)\n' + 'atom-identifiers': 'Identifiers (Names)\n' '*******************\n' '\n' 'An identifier occurring as an atom is a name. See ' @@ -446,8 +443,7 @@ 'happen. If the class name consists only of underscores, ' 'no\n' 'transformation is done.\n', - 'atom-literals': '\n' - 'Literals\n' + 'atom-literals': 'Literals\n' '********\n' '\n' 'Python supports string and bytes literals and various ' @@ -476,8 +472,7 @@ 'may obtain\n' 'the same object or a different object with the same ' 'value.\n', - 'attribute-access': '\n' - 'Customizing attribute access\n' + 'attribute-access': 'Customizing attribute access\n' '****************************\n' '\n' 'The following methods can be defined to customize the ' @@ -851,8 +846,7 @@ '* *__class__* assignment works only if both classes have ' 'the same\n' ' *__slots__*.\n', - 'attribute-references': '\n' - 'Attribute references\n' + 'attribute-references': 'Attribute references\n' '********************\n' '\n' 'An attribute reference is a primary followed by a ' @@ -875,8 +869,7 @@ 'determined by the object. Multiple evaluations of ' 'the same attribute\n' 'reference may yield different objects.\n', - 'augassign': '\n' - 'Augmented assignment statements\n' + 'augassign': 'Augmented assignment statements\n' '*******************************\n' '\n' 'Augmented assignment is the combination, in a single statement, ' @@ -940,8 +933,7 @@ 'about\n' 'class and instance attributes applies as for regular ' 'assignments.\n', - 'binary': '\n' - 'Binary arithmetic operations\n' + 'binary': 'Binary arithmetic operations\n' '****************************\n' '\n' 'The binary arithmetic operations have the conventional priority\n' @@ -1029,8 +1021,7 @@ 'The "-" (subtraction) operator yields the difference of its ' 'arguments.\n' 'The numeric arguments are first converted to a common type.\n', - 'bitwise': '\n' - 'Binary bitwise operations\n' + 'bitwise': 'Binary bitwise operations\n' '*************************\n' '\n' 'Each of the three bitwise operations has a different priority ' @@ -1050,8 +1041,7 @@ 'The "|" operator yields the bitwise (inclusive) OR of its ' 'arguments,\n' 'which must be integers.\n', - 'bltin-code-objects': '\n' - 'Code Objects\n' + 'bltin-code-objects': 'Code Objects\n' '************\n' '\n' 'Code objects are used by the implementation to ' @@ -1074,8 +1064,7 @@ '\n' 'See The standard type hierarchy for more ' 'information.\n', - 'bltin-ellipsis-object': '\n' - 'The Ellipsis Object\n' + 'bltin-ellipsis-object': 'The Ellipsis Object\n' '*******************\n' '\n' 'This object is commonly used by slicing (see ' @@ -1087,8 +1076,7 @@ '"Ellipsis" singleton.\n' '\n' 'It is written as "Ellipsis" or "...".\n', - 'bltin-null-object': '\n' - 'The Null Object\n' + 'bltin-null-object': 'The Null Object\n' '***************\n' '\n' "This object is returned by functions that don't " @@ -1100,8 +1088,7 @@ 'same singleton.\n' '\n' 'It is written as "None".\n', - 'bltin-type-objects': '\n' - 'Type Objects\n' + 'bltin-type-objects': 'Type Objects\n' '************\n' '\n' 'Type objects represent the various object types. An ' @@ -1113,8 +1100,7 @@ 'all standard built-in types.\n' '\n' 'Types are written like this: "".\n', - 'booleans': '\n' - 'Boolean operations\n' + 'booleans': 'Boolean operations\n' '******************\n' '\n' ' or_test ::= and_test | or_test "or" and_test\n' @@ -1163,8 +1149,7 @@ 'its\n' 'argument (for example, "not \'foo\'" produces "False" rather ' 'than "\'\'".)\n', - 'break': '\n' - 'The "break" statement\n' + 'break': 'The "break" statement\n' '*********************\n' '\n' ' break_stmt ::= "break"\n' @@ -1185,8 +1170,7 @@ 'clause, that "finally" clause is executed before really leaving ' 'the\n' 'loop.\n', - 'callable-types': '\n' - 'Emulating callable objects\n' + 'callable-types': 'Emulating callable objects\n' '**************************\n' '\n' 'object.__call__(self[, args...])\n' @@ -1195,8 +1179,7 @@ 'this method\n' ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' ' "x.__call__(arg1, arg2, ...)".\n', - 'calls': '\n' - 'Calls\n' + 'calls': 'Calls\n' '*****\n' '\n' 'A call calls a callable object (e.g., a *function*) with a ' @@ -1217,7 +1200,8 @@ ' ("," "*" expression | "," ' 'keyword_item)*\n' ' keywords_arguments ::= (keyword_item | "**" expression)\n' - ' ("," keyword_item | "**" expression)*\n' + ' ("," keyword_item | "," "**" ' + 'expression)*\n' ' keyword_item ::= identifier "=" expression\n' '\n' 'An optional trailing comma may be present after the positional and\n' @@ -1382,8 +1366,7 @@ ' The class must define a "__call__()" method; the effect is then ' 'the\n' ' same as if that method was called.\n', - 'class': '\n' - 'Class definitions\n' + 'class': 'Class definitions\n' '*****************\n' '\n' 'A class definition defines a class object (see section The ' @@ -1469,8 +1452,7 @@ '\n' 'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n' ' Class Decorators\n', - 'comparisons': '\n' - 'Comparisons\n' + 'comparisons': 'Comparisons\n' '***********\n' '\n' 'Unlike C, all comparison operations in Python have the same ' @@ -1623,7 +1605,7 @@ 'restriction that\n' ' ranges do not support order comparison. Equality ' 'comparison across\n' - ' these types results in unequality, and ordering comparison ' + ' these types results in inequality, and ordering comparison ' 'across\n' ' these types raises "TypeError".\n' '\n' @@ -1762,6 +1744,12 @@ ' to sequences, but not to sets or mappings). See also the\n' ' "total_ordering()" decorator.\n' '\n' + '* The "hash()" result should be consistent with equality. ' + 'Objects\n' + ' that are equal should either have the same hash value, or ' + 'be marked\n' + ' as unhashable.\n' + '\n' 'Python does not enforce these consistency rules. In fact, ' 'the\n' 'not-a-number values are an example for not following these ' @@ -1833,8 +1821,7 @@ 'is determined using the "id()" function. "x is not y" yields ' 'the\n' 'inverse truth value. [4]\n', - 'compound': '\n' - 'Compound statements\n' + 'compound': 'Compound statements\n' '*******************\n' '\n' 'Compound statements contain (groups of) other statements; they ' @@ -2725,8 +2712,7 @@ ' body is transformed into the namespace\'s "__doc__" item ' 'and\n' " therefore the class's *docstring*.\n", - 'context-managers': '\n' - 'With Statement Context Managers\n' + 'context-managers': 'With Statement Context Managers\n' '*******************************\n' '\n' 'A *context manager* is an object that defines the ' @@ -2788,8 +2774,7 @@ ' The specification, background, and examples for the ' 'Python "with"\n' ' statement.\n', - 'continue': '\n' - 'The "continue" statement\n' + 'continue': 'The "continue" statement\n' '************************\n' '\n' ' continue_stmt ::= "continue"\n' @@ -2806,8 +2791,7 @@ '"finally" clause, that "finally" clause is executed before ' 'really\n' 'starting the next loop cycle.\n', - 'conversions': '\n' - 'Arithmetic conversions\n' + 'conversions': 'Arithmetic conversions\n' '**********************\n' '\n' 'When a description of an arithmetic operator below uses the ' @@ -2833,8 +2817,7 @@ "left argument to the '%' operator). Extensions must define " 'their own\n' 'conversion behavior.\n', - 'customization': '\n' - 'Basic customization\n' + 'customization': 'Basic customization\n' '*******************\n' '\n' 'object.__new__(cls[, ...])\n' @@ -3153,15 +3136,18 @@ 'on members\n' ' of hashed collections including "set", "frozenset", and ' '"dict".\n' - ' "__hash__()" should return an integer. The only ' - 'required property\n' + ' "__hash__()" should return an integer. The only required ' + 'property\n' ' is that objects which compare equal have the same hash ' 'value; it is\n' - ' advised to somehow mix together (e.g. using exclusive ' - 'or) the hash\n' - ' values for the components of the object that also play a ' - 'part in\n' - ' comparison of objects.\n' + ' advised to mix together the hash values of the ' + 'components of the\n' + ' object that also play a part in comparison of objects by ' + 'packing\n' + ' them into a tuple and hashing the tuple. Example:\n' + '\n' + ' def __hash__(self):\n' + ' return hash((self.name, self.nick, self.color))\n' '\n' ' Note: "hash()" truncates the value returned from an ' "object's\n" @@ -3273,8 +3259,7 @@ ' neither "__len__()" nor "__bool__()", all its instances ' 'are\n' ' considered true.\n', - 'debugger': '\n' - '"pdb" --- The Python Debugger\n' + 'debugger': '"pdb" --- The Python Debugger\n' '*****************************\n' '\n' '**Source code:** Lib/pdb.py\n' @@ -3939,8 +3924,7 @@ '[1] Whether a frame is considered to originate in a certain ' 'module\n' ' is determined by the "__name__" in the frame globals.\n', - 'del': '\n' - 'The "del" statement\n' + 'del': 'The "del" statement\n' '*******************\n' '\n' ' del_stmt ::= "del" target_list\n' @@ -3969,8 +3953,7 @@ 'Changed in version 3.2: Previously it was illegal to delete a name\n' 'from the local namespace if it occurs as a free variable in a nested\n' 'block.\n', - 'dict': '\n' - 'Dictionary displays\n' + 'dict': 'Dictionary displays\n' '*******************\n' '\n' 'A dictionary display is a possibly empty series of key/datum pairs\n' @@ -4014,8 +3997,7 @@ 'should be *hashable*, which excludes all mutable objects.) Clashes\n' 'between duplicate keys are not detected; the last datum (textually\n' 'rightmost in the display) stored for a given key value prevails.\n', - 'dynamic-features': '\n' - 'Interaction with dynamic features\n' + 'dynamic-features': 'Interaction with dynamic features\n' '*********************************\n' '\n' 'Name resolution of free variables occurs at runtime, not ' @@ -4051,8 +4033,7 @@ 'override the global and local namespace. If only one ' 'namespace is\n' 'specified, it is used for both.\n', - 'else': '\n' - 'The "if" statement\n' + 'else': 'The "if" statement\n' '******************\n' '\n' 'The "if" statement is used for conditional execution:\n' @@ -4069,8 +4050,7 @@ '(and no other part of the "if" statement is executed or evaluated).\n' 'If all expressions are false, the suite of the "else" clause, if\n' 'present, is executed.\n', - 'exceptions': '\n' - 'Exceptions\n' + 'exceptions': 'Exceptions\n' '**********\n' '\n' 'Exceptions are a means of breaking out of the normal flow of ' @@ -4146,8 +4126,7 @@ ' these operations is not available at the time the module ' 'is\n' ' compiled.\n', - 'execmodel': '\n' - 'Execution model\n' + 'execmodel': 'Execution model\n' '***************\n' '\n' '\n' @@ -4478,8 +4457,7 @@ ' these operations is not available at the time the module ' 'is\n' ' compiled.\n', - 'exprlists': '\n' - 'Expression lists\n' + 'exprlists': 'Expression lists\n' '****************\n' '\n' ' expression_list ::= expression ( "," expression )* [","]\n' @@ -4516,8 +4494,7 @@ 'value of that expression. (To create an empty tuple, use an ' 'empty pair\n' 'of parentheses: "()".)\n', - 'floating': '\n' - 'Floating point literals\n' + 'floating': 'Floating point literals\n' '***********************\n' '\n' 'Floating point literals are described by the following lexical\n' @@ -4553,8 +4530,7 @@ 'Changed in version 3.6: Underscores are now allowed for ' 'grouping\n' 'purposes in literals.\n', - 'for': '\n' - 'The "for" statement\n' + 'for': 'The "for" statement\n' '*******************\n' '\n' 'The "for" statement is used to iterate over the elements of a ' @@ -4626,8 +4602,7 @@ '\n' ' for x in a[:]:\n' ' if x < 0: a.remove(x)\n', - 'formatstrings': '\n' - 'Format String Syntax\n' + 'formatstrings': 'Format String Syntax\n' '********************\n' '\n' 'The "str.format()" method and the "Formatter" class share ' @@ -5346,8 +5321,7 @@ ' 9 9 11 1001\n' ' 10 A 12 1010\n' ' 11 B 13 1011\n', - 'function': '\n' - 'Function definitions\n' + 'function': 'Function definitions\n' '********************\n' '\n' 'A function definition defines a user-defined function object ' @@ -5516,8 +5490,7 @@ '\n' ' **PEP 3107** - Function Annotations\n' ' The original specification for function annotations.\n', - 'global': '\n' - 'The "global" statement\n' + 'global': 'The "global" statement\n' '**********************\n' '\n' ' global_stmt ::= "global" identifier ("," identifier)*\n' @@ -5561,8 +5534,7 @@ 'code containing the function call. The same applies to the ' '"eval()"\n' 'and "compile()" functions.\n', - 'id-classes': '\n' - 'Reserved classes of identifiers\n' + 'id-classes': 'Reserved classes of identifiers\n' '*******************************\n' '\n' 'Certain classes of identifiers (besides keywords) have ' @@ -5610,8 +5582,7 @@ ' to help avoid name clashes between "private" attributes of ' 'base and\n' ' derived classes. See section Identifiers (Names).\n', - 'identifiers': '\n' - 'Identifiers and keywords\n' + 'identifiers': 'Identifiers and keywords\n' '************************\n' '\n' 'Identifiers (also referred to as *names*) are described by ' @@ -5759,8 +5730,7 @@ ' to help avoid name clashes between "private" attributes of ' 'base and\n' ' derived classes. See section Identifiers (Names).\n', - 'if': '\n' - 'The "if" statement\n' + 'if': 'The "if" statement\n' '******************\n' '\n' 'The "if" statement is used for conditional execution:\n' @@ -5776,8 +5746,7 @@ '(and no other part of the "if" statement is executed or evaluated).\n' 'If all expressions are false, the suite of the "else" clause, if\n' 'present, is executed.\n', - 'imaginary': '\n' - 'Imaginary literals\n' + 'imaginary': 'Imaginary literals\n' '******************\n' '\n' 'Imaginary literals are described by the following lexical ' @@ -5797,8 +5766,7 @@ '\n' ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j ' '3.14_15_93j\n', - 'import': '\n' - 'The "import" statement\n' + 'import': 'The "import" statement\n' '**********************\n' '\n' ' import_stmt ::= "import" module ["as" name] ( "," module ' @@ -6059,8 +6027,7 @@ '\n' ' **PEP 236** - Back to the __future__\n' ' The original proposal for the __future__ mechanism.\n', - 'in': '\n' - 'Membership test operations\n' + 'in': 'Membership test operations\n' '**************************\n' '\n' 'The operators "in" and "not in" test for membership. "x in s"\n' @@ -6095,8 +6062,7 @@ '\n' 'The operator "not in" is defined to have the inverse true value of\n' '"in".\n', - 'integers': '\n' - 'Integer literals\n' + 'integers': 'Integer literals\n' '****************\n' '\n' 'Integer literals are described by the following lexical ' @@ -6142,8 +6108,7 @@ 'Changed in version 3.6: Underscores are now allowed for ' 'grouping\n' 'purposes in literals.\n', - 'lambda': '\n' - 'Lambdas\n' + 'lambda': 'Lambdas\n' '*******\n' '\n' ' lambda_expr ::= "lambda" [parameter_list]: expression\n' @@ -6166,8 +6131,7 @@ 'Note that functions created with lambda expressions cannot ' 'contain\n' 'statements or annotations.\n', - 'lists': '\n' - 'List displays\n' + 'lists': 'List displays\n' '*************\n' '\n' 'A list display is a possibly empty series of expressions enclosed ' @@ -6184,8 +6148,7 @@ 'from left to right and placed into the list object in that order.\n' 'When a comprehension is supplied, the list is constructed from the\n' 'elements resulting from the comprehension.\n', - 'naming': '\n' - 'Naming and binding\n' + 'naming': 'Naming and binding\n' '******************\n' '\n' '\n' @@ -6398,8 +6361,7 @@ 'override the global and local namespace. If only one namespace ' 'is\n' 'specified, it is used for both.\n', - 'nonlocal': '\n' - 'The "nonlocal" statement\n' + 'nonlocal': 'The "nonlocal" statement\n' '************************\n' '\n' ' nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n' @@ -6430,8 +6392,7 @@ '\n' ' **PEP 3104** - Access to Names in Outer Scopes\n' ' The specification for the "nonlocal" statement.\n', - 'numbers': '\n' - 'Numeric literals\n' + 'numbers': 'Numeric literals\n' '****************\n' '\n' 'There are three types of numeric literals: integers, floating ' @@ -6445,8 +6406,7 @@ 'is actually an expression composed of the unary operator \'"-"\' ' 'and the\n' 'literal "1".\n', - 'numeric-types': '\n' - 'Emulating numeric types\n' + 'numeric-types': 'Emulating numeric types\n' '***********************\n' '\n' 'The following methods can be defined to emulate numeric ' @@ -6622,8 +6582,7 @@ ' "__index__()" is defined "__int__()" should also be ' 'defined, and\n' ' both should return the same value.\n', - 'objects': '\n' - 'Objects, values and types\n' + 'objects': 'Objects, values and types\n' '*************************\n' '\n' "*Objects* are Python's abstraction for data. All data in a " @@ -6751,8 +6710,7 @@ 'created empty lists. (Note that "c = d = []" assigns the same ' 'object\n' 'to both "c" and "d".)\n', - 'operator-summary': '\n' - 'Operator precedence\n' + 'operator-summary': 'Operator precedence\n' '*******************\n' '\n' 'The following table summarizes the operator precedence ' @@ -6925,8 +6883,7 @@ 'arithmetic\n' ' or bitwise unary operator on its right, that is, ' '"2**-1" is "0.5".\n', - 'pass': '\n' - 'The "pass" statement\n' + 'pass': 'The "pass" statement\n' '********************\n' '\n' ' pass_stmt ::= "pass"\n' @@ -6939,8 +6896,7 @@ ' def f(arg): pass # a function that does nothing (yet)\n' '\n' ' class C: pass # a class with no methods (yet)\n', - 'power': '\n' - 'The power operator\n' + 'power': 'The power operator\n' '******************\n' '\n' 'The power operator binds more tightly than unary operators on its\n' @@ -6974,8 +6930,7 @@ 'Raising a negative number to a fractional power results in a ' '"complex"\n' 'number. (In earlier versions it raised a "ValueError".)\n', - 'raise': '\n' - 'The "raise" statement\n' + 'raise': 'The "raise" statement\n' '*********************\n' '\n' ' raise_stmt ::= "raise" [expression ["from" expression]]\n' @@ -7060,8 +7015,7 @@ 'Exceptions, and information about handling exceptions is in ' 'section\n' 'The try statement.\n', - 'return': '\n' - 'The "return" statement\n' + 'return': 'The "return" statement\n' '**********************\n' '\n' ' return_stmt ::= "return" [expression_list]\n' @@ -7096,8 +7050,7 @@ '"StopAsyncIteration" to be raised. A non-empty "return" statement ' 'is\n' 'a syntax error in an asynchronous generator function.\n', - 'sequence-types': '\n' - 'Emulating container types\n' + 'sequence-types': 'Emulating container types\n' '*************************\n' '\n' 'The following methods can be defined to implement ' @@ -7318,8 +7271,7 @@ ' iteration protocol via "__getitem__()", see this ' 'section in the\n' ' language reference.\n', - 'shifting': '\n' - 'Shifting operations\n' + 'shifting': 'Shifting operations\n' '*******************\n' '\n' 'The shifting operations have lower priority than the arithmetic\n' @@ -7343,8 +7295,7 @@ 'operand is\n' ' larger than "sys.maxsize" an "OverflowError" exception is ' 'raised.\n', - 'slicings': '\n' - 'Slicings\n' + 'slicings': 'Slicings\n' '********\n' '\n' 'A slicing selects a range of items in a sequence object (e.g., ' @@ -7395,8 +7346,7 @@ 'as lower bound, upper bound and stride, respectively, ' 'substituting\n' '"None" for missing expressions.\n', - 'specialattrs': '\n' - 'Special Attributes\n' + 'specialattrs': 'Special Attributes\n' '******************\n' '\n' 'The implementation adds a few special read-only attributes ' @@ -7481,8 +7431,7 @@ '[5] To format only a tuple you should therefore provide a\n' ' singleton tuple whose only element is the tuple to be ' 'formatted.\n', - 'specialnames': '\n' - 'Special method names\n' + 'specialnames': 'Special method names\n' '********************\n' '\n' 'A class can implement certain operations that are invoked by ' @@ -7843,15 +7792,18 @@ 'on members\n' ' of hashed collections including "set", "frozenset", and ' '"dict".\n' - ' "__hash__()" should return an integer. The only required ' + ' "__hash__()" should return an integer. The only required ' 'property\n' ' is that objects which compare equal have the same hash ' 'value; it is\n' - ' advised to somehow mix together (e.g. using exclusive or) ' - 'the hash\n' - ' values for the components of the object that also play a ' - 'part in\n' - ' comparison of objects.\n' + ' advised to mix together the hash values of the components ' + 'of the\n' + ' object that also play a part in comparison of objects by ' + 'packing\n' + ' them into a tuple and hashing the tuple. Example:\n' + '\n' + ' def __hash__(self):\n' + ' return hash((self.name, self.nick, self.color))\n' '\n' ' Note: "hash()" truncates the value returned from an ' "object's\n" @@ -9270,8 +9222,7 @@ 'special method *must* be set on the class object itself in ' 'order to be\n' 'consistently invoked by the interpreter).\n', - 'string-methods': '\n' - 'String Methods\n' + 'string-methods': 'String Methods\n' '**************\n' '\n' 'Strings implement all of the common sequence operations, ' @@ -9508,12 +9459,11 @@ 'characters\n' ' and there is at least one character, false otherwise. ' 'Decimal\n' - ' characters are those from general category "Nd". This ' - 'category\n' - ' includes digit characters, and all characters that can ' - 'be used to\n' - ' form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC ' - 'DIGIT ZERO.\n' + ' characters are those that can be used to form numbers ' + 'in base 10,\n' + ' e.g. U+0660, ARABIC-INDIC DIGIT ZERO. Formally a ' + 'decimal character\n' + ' is a character in the Unicode General Category "Nd".\n' '\n' 'str.isdigit()\n' '\n' @@ -9523,10 +9473,13 @@ 'include decimal\n' ' characters and digits that need special handling, such ' 'as the\n' - ' compatibility superscript digits. Formally, a digit is ' - 'a character\n' - ' that has the property value Numeric_Type=Digit or\n' - ' Numeric_Type=Decimal.\n' + ' compatibility superscript digits. This covers digits ' + 'which cannot\n' + ' be used to form numbers in base 10, like the Kharosthi ' + 'numbers.\n' + ' Formally, a digit is a character that has the property ' + 'value\n' + ' Numeric_Type=Digit or Numeric_Type=Decimal.\n' '\n' 'str.isidentifier()\n' '\n' @@ -10072,8 +10025,7 @@ " '00042'\n" ' >>> "-42".zfill(5)\n' " '-0042'\n", - 'strings': '\n' - 'String and Bytes literals\n' + 'strings': 'String and Bytes literals\n' '*************************\n' '\n' 'String literals are described by the following lexical ' @@ -10307,8 +10259,7 @@ 'followed by a newline is interpreted as those two characters as ' 'part\n' 'of the literal, *not* as a line continuation.\n', - 'subscriptions': '\n' - 'Subscriptions\n' + 'subscriptions': 'Subscriptions\n' '*************\n' '\n' 'A subscription selects an item of a sequence (string, tuple ' @@ -10365,8 +10316,7 @@ "A string's items are characters. A character is not a " 'separate data\n' 'type but a string of exactly one character.\n', - 'truth': '\n' - 'Truth Value Testing\n' + 'truth': 'Truth Value Testing\n' '*******************\n' '\n' 'Any object can be tested for truth value, for use in an "if" or\n' @@ -10398,8 +10348,7 @@ 'otherwise stated. (Important exception: the Boolean operations ' '"or"\n' 'and "and" always return one of their operands.)\n', - 'try': '\n' - 'The "try" statement\n' + 'try': 'The "try" statement\n' '*******************\n' '\n' 'The "try" statement specifies exception handlers and/or cleanup code\n' @@ -10546,8 +10495,7 @@ 'Exceptions, and information on using the "raise" statement to ' 'generate\n' 'exceptions may be found in section The raise statement.\n', - 'types': '\n' - 'The standard type hierarchy\n' + 'types': 'The standard type hierarchy\n' '***************************\n' '\n' 'Below is a list of the types that are built into Python. ' @@ -11262,14 +11210,14 @@ 'the\n' ' dictionary containing the class\'s namespace; "__bases__" is a ' 'tuple\n' - ' (possibly empty or a singleton) containing the base classes, in ' - 'the\n' - ' order of their occurrence in the base class list; "__doc__" is ' - 'the\n' - ' class\'s documentation string, or "None" if undefined;\n' - ' "__annotations__" (optional) is a dictionary containing ' - '*variable\n' - ' annotations* collected during class body execution.\n' + ' containing the base classes, in the order of their occurrence ' + 'in\n' + ' the base class list; "__doc__" is the class\'s documentation ' + 'string,\n' + ' or "None" if undefined; "__annotations__" (optional) is a\n' + ' dictionary containing *variable annotations* collected during ' + 'class\n' + ' body execution.\n' '\n' 'Class instances\n' ' A class instance is created by calling a class object (see ' @@ -11549,8 +11497,7 @@ ' under "User-defined methods". Class method objects are ' 'created\n' ' by the built-in "classmethod()" constructor.\n', - 'typesfunctions': '\n' - 'Functions\n' + 'typesfunctions': 'Functions\n' '*********\n' '\n' 'Function objects are created by function definitions. The ' @@ -11567,8 +11514,7 @@ 'different object types.\n' '\n' 'See Function definitions for more information.\n', - 'typesmapping': '\n' - 'Mapping Types --- "dict"\n' + 'typesmapping': 'Mapping Types --- "dict"\n' '************************\n' '\n' 'A *mapping* object maps *hashable* values to arbitrary ' @@ -11925,8 +11871,7 @@ " {'bacon'}\n" " >>> keys ^ {'sausage', 'juice'}\n" " {'juice', 'sausage', 'bacon', 'spam'}\n", - 'typesmethods': '\n' - 'Methods\n' + 'typesmethods': 'Methods\n' '*******\n' '\n' 'Methods are functions that are called using the attribute ' @@ -11983,8 +11928,7 @@ " 'my name is method'\n" '\n' 'See The standard type hierarchy for more information.\n', - 'typesmodules': '\n' - 'Modules\n' + 'typesmodules': 'Modules\n' '*******\n' '\n' 'The only special operation on a module is attribute access: ' @@ -12021,8 +11965,7 @@ 'written as\n' '"".\n', - 'typesseq': '\n' - 'Sequence Types --- "list", "tuple", "range"\n' + 'typesseq': 'Sequence Types --- "list", "tuple", "range"\n' '*******************************************\n' '\n' 'There are three basic sequence types: lists, tuples, and range\n' @@ -12170,9 +12113,9 @@ '\n' '3. If *i* or *j* is negative, the index is relative to the end ' 'of\n' - ' the string: "len(s) + i" or "len(s) + j" is substituted. But ' - 'note\n' - ' that "-0" is still "0".\n' + ' sequence *s*: "len(s) + i" or "len(s) + j" is substituted. ' + 'But\n' + ' note that "-0" is still "0".\n' '\n' '4. The slice of *s* from *i* to *j* is defined as the sequence ' 'of\n' @@ -12191,12 +12134,17 @@ ' (j-i)/k". In other words, the indices are "i", "i+k", ' '"i+2*k",\n' ' "i+3*k" and so on, stopping when *j* is reached (but never\n' - ' including *j*). If *i* or *j* is greater than "len(s)", use\n' - ' "len(s)". If *i* or *j* are omitted or "None", they become ' - '"end"\n' - ' values (which end depends on the sign of *k*). Note, *k* ' - 'cannot be\n' - ' zero. If *k* is "None", it is treated like "1".\n' + ' including *j*). When *k* is positive, *i* and *j* are ' + 'reduced to\n' + ' "len(s)" if they are greater. When *k* is negative, *i* and ' + '*j* are\n' + ' reduced to "len(s) - 1" if they are greater. If *i* or *j* ' + 'are\n' + ' omitted or "None", they become "end" values (which end ' + 'depends on\n' + ' the sign of *k*). Note, *k* cannot be zero. If *k* is ' + '"None", it\n' + ' is treated like "1".\n' '\n' '6. Concatenating immutable sequences always results in a new\n' ' object. This means that building up a sequence by repeated\n' @@ -12714,8 +12662,7 @@ ' * The linspace recipe shows how to implement a lazy version ' 'of\n' ' range that suitable for floating point applications.\n', - 'typesseq-mutable': '\n' - 'Mutable Sequence Types\n' + 'typesseq-mutable': 'Mutable Sequence Types\n' '**********************\n' '\n' 'The operations in the following table are defined on ' @@ -12855,8 +12802,7 @@ 'referenced multiple\n' ' times, as explained for "s * n" under Common Sequence ' 'Operations.\n', - 'unary': '\n' - 'Unary arithmetic and bitwise operations\n' + 'unary': 'Unary arithmetic and bitwise operations\n' '***************************************\n' '\n' 'All unary arithmetic and bitwise operations have the same ' @@ -12878,8 +12824,7 @@ 'In all three cases, if the argument does not have the proper type, ' 'a\n' '"TypeError" exception is raised.\n', - 'while': '\n' - 'The "while" statement\n' + 'while': 'The "while" statement\n' '*********************\n' '\n' 'The "while" statement is used for repeated execution as long as an\n' @@ -12903,8 +12848,7 @@ 'executed in the first suite skips the rest of the suite and goes ' 'back\n' 'to testing the expression.\n', - 'with': '\n' - 'The "with" statement\n' + 'with': 'The "with" statement\n' '********************\n' '\n' 'The "with" statement is used to wrap the execution of a block with\n' @@ -12977,8 +12921,7 @@ ' The specification, background, and examples for the Python ' '"with"\n' ' statement.\n', - 'yield': '\n' - 'The "yield" statement\n' + 'yield': 'The "yield" statement\n' '*********************\n' '\n' ' yield_stmt ::= yield_expression\n' diff --git a/lib-python/3/random.py b/lib-python/3/random.py --- a/lib-python/3/random.py +++ b/lib-python/3/random.py @@ -254,7 +254,7 @@ try: i = self._randbelow(len(seq)) except ValueError: - raise IndexError('Cannot choose from an empty sequence') + raise IndexError('Cannot choose from an empty sequence') from None return seq[i] def shuffle(self, x, random=None): diff --git a/lib-python/3/secrets.py b/lib-python/3/secrets.py --- a/lib-python/3/secrets.py +++ b/lib-python/3/secrets.py @@ -26,6 +26,8 @@ def randbelow(exclusive_upper_bound): """Return a random int in the range [0, n).""" + if exclusive_upper_bound <= 0: + raise ValueError("Upper bound must be positive.") return _sysrand._randbelow(exclusive_upper_bound) DEFAULT_ENTROPY = 32 # number of bytes to return by default diff --git a/lib-python/3/shlex.py b/lib-python/3/shlex.py --- a/lib-python/3/shlex.py +++ b/lib-python/3/shlex.py @@ -232,11 +232,6 @@ break # emit current token else: continue - elif self.posix and nextchar in self.quotes: - self.state = nextchar - elif self.posix and nextchar in self.escape: - escapedstate = 'a' - self.state = nextchar elif self.state == 'c': if nextchar in self.punctuation_chars: self.token += nextchar @@ -245,6 +240,11 @@ self._pushback_chars.append(nextchar) self.state = ' ' break + elif self.posix and nextchar in self.quotes: + self.state = nextchar + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar elif (nextchar in self.wordchars or nextchar in self.quotes or self.whitespace_split): self.token += nextchar diff --git a/lib-python/3/shutil.py b/lib-python/3/shutil.py --- a/lib-python/3/shutil.py +++ b/lib-python/3/shutil.py @@ -10,7 +10,13 @@ import fnmatch import collections import errno -import tarfile + +try: + import zlib + del zlib + _ZLIB_SUPPORTED = True +except ImportError: + _ZLIB_SUPPORTED = False try: import bz2 @@ -602,23 +608,22 @@ Returns the output filename. """ - tar_compression = {'gzip': 'gz', None: ''} - compress_ext = {'gzip': '.gz'} - - if _BZ2_SUPPORTED: - tar_compression['bzip2'] = 'bz2' - compress_ext['bzip2'] = '.bz2' - - if _LZMA_SUPPORTED: - tar_compression['xz'] = 'xz' - compress_ext['xz'] = '.xz' - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext: + if compress is None: + tar_compression = '' + elif _ZLIB_SUPPORTED and compress == 'gzip': + tar_compression = 'gz' + elif _BZ2_SUPPORTED and compress == 'bzip2': + tar_compression = 'bz2' + elif _LZMA_SUPPORTED and compress == 'xz': + tar_compression = 'xz' + else: raise ValueError("bad value for 'compress', or compression format not " "supported : {0}".format(compress)) - archive_name = base_name + '.tar' + compress_ext.get(compress, '') + import tarfile # late import for breaking circular dependency + + compress_ext = '.' + tar_compression if compress else '' + archive_name = base_name + '.tar' + compress_ext archive_dir = os.path.dirname(archive_name) if archive_dir and not os.path.exists(archive_dir): @@ -644,7 +649,7 @@ return tarinfo if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) From pypy.commits at gmail.com Thu Jul 13 10:55:56 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 13 Jul 2017 07:55:56 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: First draft of the talk Message-ID: <596789fc.cd3f1c0a.1d99d.4e75@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5817:3e9c42adda30 Date: 2017-07-13 16:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/3e9c42adda30/ Log: First draft of the talk diff --git a/talk/ep2017/pypy-whats-new/Makefile b/talk/ep2017/pypy-whats-new/Makefile new file mode 100644 --- /dev/null +++ b/talk/ep2017/pypy-whats-new/Makefile @@ -0,0 +1,6 @@ +slides.pdf: slides.tex author.latex + pdflatex $< + +slides.tex: slides.rst + rst2beamer.py slides.rst > slides.tex + sed 's/\\date{}/\\input{author.latex}/' -i slides.tex || exit diff --git a/talk/ep2017/pypy-whats-new/author.latex b/talk/ep2017/pypy-whats-new/author.latex new file mode 100644 --- /dev/null +++ b/talk/ep2017/pypy-whats-new/author.latex @@ -0,0 +1,7 @@ +\definecolor{rrblitbackground}{rgb}{0.4, 0.0, 0.0} + +\title[What's New in PyPy]{PyPy meets Python 3 and Numpy (and other What's New topics)} +\author[Armin Rigo]{Armin Rigo} + +\institute{EuroPython 2017} +\date{July 2017} diff --git a/talk/ep2017/pypy-whats-new/graphs.png b/talk/ep2017/pypy-whats-new/graphs.png new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..9e5b0156179767c78b72f414147959bec8e982c0 GIT binary patch [cut] diff --git a/talk/ep2017/pypy-whats-new/slides.rst b/talk/ep2017/pypy-whats-new/slides.rst new file mode 100644 --- /dev/null +++ b/talk/ep2017/pypy-whats-new/slides.rst @@ -0,0 +1,245 @@ +=========================================================== +PyPy meets Python 3 and Numpy (and other What's New topics) +=========================================================== + + +What's New In PyPy +================== + +1. Python 3.5 + +2. Numpy, Cython, Pandas, ... + +3. pypy-stm? + +4. RevDB: reverse debugging + +5. Others... + + +What is PyPy +============ + +* PyPy is another implementation of Python + +* Supports only Python 2.7... up to this year + +* Comes with a JIT, good performance + +* Mostly, drop-in replacement + + + +PyPy 3.5 +============================================================ + +PyPy 3.5 +========== + +* Python 3.5 support released in **beta** + +* Drop-in replacement for ``python3.5`` + +* Thanks to Mozilla for funding this work! + + +PyPy 3.5 +========== + +* Async HTTP benchmarks: + +.. image:: graphs.png + :scale: 21% + + +PyPy 3.5 status +=============== + +* Roughly complete 3.5 support (plus f-strings!) + +* Reasonably good performance + +* Tested mostly on Linux so far + +* Non-beta to be released "soon" (i.e. some time in 2017 I guess) + +* Python 3.6 to follow + + +Scientific stack +============================================================ + +Scientific stack +================ + +Numpy or Numpypy? + +* Numpy = the standard numpy library + +* Numpypy = our own partial reimplementation + + +Scientific stack +================ + +Numpy or Numpypy? + +* Numpy = the standard numpy library *(use this)* + +* Numpypy = our own partial reimplementation *(deprecated)* + + +Scientific stack +================ + +* Numpy works (99.9%) + +* On both PyPy 2.7 and PyPy 3.5 + +* The rest of the scientific stack mostly works too (Pandas etc.) + + +Cython, cpyext +================ + +* Cython mostly works + +* Any CPython C extension module mostly works + +* This is all thanks to ``cpyext``, our CPython C API emulation layer + + +Performance? +============== + +* Numpy/Pandas/etc. are all slow-ish at the Python-C boundary + +* Less so than last year + +* Complex algorithms written inside Numpy in C or Fortran have the same + speed, of course + + +Performance? +============== + +* Basically: try it out on your own code + + + +Software Transactional Memory +============================================================ + +Software Transactional Memory +============================= + +* ``pypy-stm``: getting rid of the Global Interpreter Lock + +* ...unfortunately, this approach does not seem to work :-( + + +Software Transactional Memory +============================= + +Unstable performance: + +* "Conflicts" between threads are hard to find + +* Fix one conflict, usually only to uncover the next one + +* As long as there is one, performance is bad + +* Continue developing the program, and you'll often reintroduce conflicts + + +PyPy-nogil? +=========== + +* Thinking instead about a GIL-free but non-STM PyPy + + + +Reverse Debugger +============================================================ + +Reverse Debugger +================ + +* The essential tool you need once a year + + +Reverse Debugger +================ + +* Debugger with the ability to go forward *and backward in time* + +* Watchpoints to know when a value changes when going in either + direction + +* http://bitbucket.org/pypy/revdb + + + +Others +============================================================ + +JIT improvements +====================== + +* Reductions in the warm-up time + +* Consumes less memory, too + + +VMProf +===================== + +* ``pip install vmprof`` + +* Works on CPython and on PyPy + +* A "good" high-performance profiler for Python code + +* The PyPy version shows the machine code generated by the JIT + + +CFFI improvements +======================= + +* CFFI: calling C from Python (from CPython or PyPy) + +* Biggest improvement of last year is *embedding* + +* Use CFFI to embed Python inside another program---much easier than + with the CPython C API, and works identically on CPython or PyPy too + + +Next year? +================================================================= + +Next year? +========== + +* Polish PyPy 3.5 / 3.6 + +* Polish Numpy and the scientific stack + +* Play with PyPy-nogil + +* Port RevDB to PyPy 3.5 + +. + + +Question & answers +================== + +* Polish PyPy 3.5 / 3.6 + +* Polish Numpy and the scientific stack + +* Play with PyPy-nogil + +* Port RevDB to PyPy 3.5 + +Thank you! PyPy main site: http://pypy.org/ From pypy.commits at gmail.com Fri Jul 14 04:50:37 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 14 Jul 2017 01:50:37 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: Updates Message-ID: <596885dd.848ddf0a.390e6.8f46@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5818:ec3ca5da8208 Date: 2017-07-14 10:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/ec3ca5da8208/ Log: Updates diff --git a/talk/ep2017/pypy-whats-new/Makefile b/talk/ep2017/pypy-whats-new/Makefile --- a/talk/ep2017/pypy-whats-new/Makefile +++ b/talk/ep2017/pypy-whats-new/Makefile @@ -4,3 +4,4 @@ slides.tex: slides.rst rst2beamer.py slides.rst > slides.tex sed 's/\\date{}/\\input{author.latex}/' -i slides.tex || exit + sed 's/\\documentclass\[\]{beamer}/\\documentclass\[aspectratio=169\]{beamer}/' -i slides.tex || exit diff --git a/talk/ep2017/pypy-whats-new/slides.rst b/talk/ep2017/pypy-whats-new/slides.rst --- a/talk/ep2017/pypy-whats-new/slides.rst +++ b/talk/ep2017/pypy-whats-new/slides.rst @@ -22,13 +22,12 @@ * PyPy is another implementation of Python -* Supports only Python 2.7... up to this year +* Mostly, drop-in replacement + +* Supports Python 2.7, and almost Python 3.5 * Comes with a JIT, good performance -* Mostly, drop-in replacement - - PyPy 3.5 ============================================================ @@ -36,9 +35,13 @@ PyPy 3.5 ========== -* Python 3.5 support released in **beta** +* Python 3.5 support released in *gamma* -* Drop-in replacement for ``python3.5`` + - as stable (and mostly as fast) as PyPy 2.7, same JIT/GC/etc. + + - what could be wrong is a few details everywhere + + - please try it and *report issues!* * Thanks to Mozilla for funding this work! @@ -49,7 +52,7 @@ * Async HTTP benchmarks: .. image:: graphs.png - :scale: 21% + :scale: 26% PyPy 3.5 status @@ -61,7 +64,7 @@ * Tested mostly on Linux so far -* Non-beta to be released "soon" (i.e. some time in 2017 I guess) +* First "final" to be released soon (i.e. some time in 2017 I guess) * Python 3.6 to follow @@ -96,7 +99,8 @@ * On both PyPy 2.7 and PyPy 3.5 -* The rest of the scientific stack mostly works too (Pandas etc.) +* The rest of the scientific stack mostly works too (Jupyter, + Matplotlib, Pandas, etc.) Cython, cpyext @@ -104,9 +108,9 @@ * Cython mostly works -* Any CPython C extension module mostly works +* Actually, any CPython C extension module mostly works -* This is all thanks to ``cpyext``, our CPython C API emulation layer +* Thanks to ``cpyext``, our CPython C API emulation layer Performance? @@ -114,16 +118,25 @@ * Numpy/Pandas/etc. are all slow-ish at the Python-C boundary -* Less so than last year +* Less so than last year but still * Complex algorithms written inside Numpy in C or Fortran have the same speed, of course + - lots of ``ndarray[index]``: slow + + - one call to ``numpy.linalg.eig()``: fast + + - speed hack: ``p = ffi.cast("double *", ffi.from_buffer(ndarray))`` + Performance? ============== -* Basically: try it out on your own code +* We have plans to improve + +* For now: try it out on your own code and see + @@ -135,6 +148,10 @@ * ``pypy-stm``: getting rid of the Global Interpreter Lock + +Software Transactional Memory +============================= + * ...unfortunately, this approach does not seem to work :-( @@ -149,8 +166,12 @@ * As long as there is one, performance is bad +* You may fix enough to get good performance... but: + * Continue developing the program, and you'll often reintroduce conflicts +* (Also, hard to test for, reliably) + PyPy-nogil? =========== @@ -165,7 +186,7 @@ Reverse Debugger ================ -* The essential tool you need once a year +* RevDB: The essential tool you need once a year Reverse Debugger @@ -173,7 +194,7 @@ * Debugger with the ability to go forward *and backward in time* -* Watchpoints to know when a value changes when going in either +* Watchpoints to know when a value changes, while going in either direction * http://bitbucket.org/pypy/revdb @@ -194,19 +215,21 @@ VMProf ===================== +* A good high-performance profiler for Python code + * ``pip install vmprof`` * Works on CPython and on PyPy -* A "good" high-performance profiler for Python code - * The PyPy version shows the machine code generated by the JIT CFFI improvements ======================= -* CFFI: calling C from Python (from CPython or PyPy) +* CFFI: calling C from Python or the other way around + +* Works identically on CPython and on PyPy * Biggest improvement of last year is *embedding* From pypy.commits at gmail.com Fri Jul 14 05:10:00 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 14 Jul 2017 02:10:00 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: Commit pdf Message-ID: <59688a68.832d1c0a.9eddf.bf9f@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5819:a8103c0889c7 Date: 2017-07-14 11:09 +0200 http://bitbucket.org/pypy/extradoc/changeset/a8103c0889c7/ Log: Commit pdf diff --git a/talk/ep2017/pypy-whats-new/slides.pdf b/talk/ep2017/pypy-whats-new/slides.pdf new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1ce3358401642402771f262e5dc9c7871b2655d9 GIT binary patch [cut] From pypy.commits at gmail.com Fri Jul 14 05:17:10 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 14 Jul 2017 02:17:10 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: updates Message-ID: <59688c16.ce8c1c0a.e022d.efd9@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5820:22d92b9985f8 Date: 2017-07-14 11:16 +0200 http://bitbucket.org/pypy/extradoc/changeset/22d92b9985f8/ Log: updates diff --git a/talk/ep2017/pypy-whats-new/slides.pdf b/talk/ep2017/pypy-whats-new/slides.pdf index 1ce3358401642402771f262e5dc9c7871b2655d9..0d7f8340e7c9a2343678105a5add53ea3bcc7bd7 GIT binary patch [cut] diff --git a/talk/ep2017/pypy-whats-new/slides.rst b/talk/ep2017/pypy-whats-new/slides.rst --- a/talk/ep2017/pypy-whats-new/slides.rst +++ b/talk/ep2017/pypy-whats-new/slides.rst @@ -135,6 +135,8 @@ * We have plans to improve +* Funding help welcome + * For now: try it out on your own code and see @@ -178,6 +180,8 @@ * Thinking instead about a GIL-free but non-STM PyPy +* See lightning talk later + Reverse Debugger From pypy.commits at gmail.com Fri Jul 14 07:49:18 2017 From: pypy.commits at gmail.com (stevie_92) Date: Fri, 14 Jul 2017 04:49:18 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gc-trialdeletion: Implemented flags for concurrent cycle deletion (Bacon and Rajan 2001) with overflow handling for refcount and removed unnecessary code Message-ID: <5968afbe.810f1c0a.4fad9.0fc7@mx.google.com> Author: Stefan Beyer Branch: cpyext-gc-trialdeletion Changeset: r91867:f8d7bb5f29b6 Date: 2017-07-14 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/f8d7bb5f29b6/ Log: Implemented flags for concurrent cycle deletion (Bacon and Rajan 2001) with overflow handling for refcount and removed unnecessary code diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -2,6 +2,7 @@ #define Py_OBJECT_H #include +#include #ifdef __cplusplus extern "C" { @@ -9,10 +10,17 @@ #include "cpyext_object.h" +int* foo; #define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t)-1)>>1)) #define PY_SSIZE_T_MIN (-PY_SSIZE_T_MAX-1) -#define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None +#define PY_REFCNT_FROM_PYPY (4L << ((long)(log(PY_SSIZE_T_MAX) / log(2) - 2))) +#define PY_REFCNT_GREEN (4L << ((long)(log(PY_SSIZE_T_MAX) / log(2) - 6))) +#define PY_REFCNT_OVERFLOW (1L << ((long)(log(PY_SSIZE_T_MAX) / log(2) - 6) / 2L - 1L)) +#define PY_REFCNT_MASK ((PY_REFCNT_OVERFLOW << 1L) - 1L) + +#define Py_RETURN_NONE return (((((PyObject *)(Py_None))->ob_refcnt & PY_REFCNT_OVERFLOW) == 0) ? \ + ((PyObject *)(Py_None))->ob_refcnt++ : Py_IncRef((PyObject *)(Py_None))), Py_None /* CPython has this for backwards compatibility with really old extensions, and now @@ -26,26 +34,34 @@ #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, -// #ifdef PYPY_DEBUG_REFCOUNT -// /* Slow version, but useful for debugging */ +#ifdef PYPY_DEBUG_REFCOUNT +/* Slow version, but useful for debugging */ #define Py_INCREF(ob) (Py_IncRef((PyObject *)(ob))) #define Py_DECREF(ob) (Py_DecRef((PyObject *)(ob))) #define Py_XINCREF(ob) (Py_IncRef((PyObject *)(ob))) #define Py_XDECREF(ob) (Py_DecRef((PyObject *)(ob))) -// #else -// /* Fast version */ -// #define Py_INCREF(ob) (((PyObject *)(ob))->ob_refcnt++) -// #define Py_DECREF(op) \ -// do { \ -// if (--((PyObject *)(op))->ob_refcnt != 0) \ -// ; \ -// else \ -// _Py_Dealloc((PyObject *)(op)); \ -// } while (0) - -// #define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0) -// #define Py_XDECREF(op) do { if ((op) == NULL) ; else Py_DECREF(op); } while (0) -// #endif +#else +/* Fast version */ +#define Py_INCREF(ob) \ + do { \ + if (!(((PyObject *)(ob))->ob_refcnt & PY_REFCNT_OVERFLOW)) \ + ((PyObject *)(ob))->ob_refcnt++; \ + else \ + Py_IncRef((PyObject *)(ob)); \ + } while (0) +#define Py_DECREF(ob) \ + do { \ + if (!(((PyObject *)(ob))->ob_refcnt & PY_REFCNT_GREEN) || \ + (((PyObject *)(ob))->ob_refcnt & PY_REFCNT_OVERFLOW)) \ + Py_DecRef((PyObject *)(ob)); \ + else if (--((PyObject *)(ob))->ob_refcnt & PY_REFCNT_MASK) \ + ; \ + else if (!((PyObject *)(ob))->ob_refcnt & PY_REFCNT_FROM_PYPY) \ + _Py_Dealloc((PyObject *)(ob)); \ + } while (0) +#define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0) +#define Py_XDECREF(op) do { if ((op) == NULL) ; else Py_DECREF(op); } while (0) +#endif #define Py_CLEAR(op) \ do { \ @@ -56,7 +72,8 @@ } \ } while (0) -#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) +#define Py_REFCNT(ob) ((((PyObject *)(ob))->ob_refcnt & PY_REFCNT_OVERFLOW == 0) ? \ + (((PyObject*)(ob))->ob_refcnt & PY_REFCNT_MASK) : _Py_RefCnt_Overflow(ob)) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -11,6 +11,7 @@ from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.error import OperationError, oefmt +from rpython.rlib.rawrefcount import REFCNT_MASK import pypy.module.__builtin__.operation as operation @@ -61,7 +62,7 @@ def _dealloc(space, obj): # This frees an object after its refcount dropped to zero, so we # assert that it is really zero here. - assert obj.c_ob_refcnt == 0 + assert obj.c_ob_refcnt & REFCNT_MASK == 0 pto = obj.c_ob_type obj_voidp = rffi.cast(rffi.VOIDP, obj) generic_cpy_call(space, pto.c_tp_free, obj_voidp) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -270,6 +270,27 @@ hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) +def _decref(pyobj): + if pyobj.c_ob_refcnt & rawrefcount.REFCNT_OVERFLOW == 0: + pyobj.c_ob_refcnt -= 1 + else: + if pyobj.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW: + pyobj.c_ob_refcnt -= 1 + elif rawrefcount.overflow_sub(pyobj): + pyobj.c_ob_refcnt -= 1 + +def _incref(pyobj): + if pyobj.c_ob_refcnt & rawrefcount.REFCNT_OVERFLOW == 0: + pyobj.c_ob_refcnt += 1 + else: + if pyobj.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW: + pyobj.c_ob_refcnt += 1 + rawrefcount.overflow_new(pyobj) + else: + rawrefcount.overflow_add(pyobj) + @specialize.ll() def make_ref(space, obj, w_userdata=None): """Increment the reference counter of the PyObject and return it. @@ -280,8 +301,7 @@ else: pyobj = as_pyobj(space, obj, w_userdata) if pyobj: - assert pyobj.c_ob_refcnt > 0 - pyobj.c_ob_refcnt += 1 + _incref(pyobj) if not is_pyobj(obj): keepalive_until_here(obj) return pyobj @@ -301,12 +321,10 @@ w_obj = obj pyobj = as_pyobj(space, w_obj) if pyobj: - pyobj.c_ob_refcnt -= 1 - assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY + _decref(pyobj) keepalive_until_here(w_obj) return w_obj - @specialize.ll() def incref(space, obj): make_ref(space, obj) @@ -316,20 +334,33 @@ if is_pyobj(obj): obj = rffi.cast(PyObject, obj) if obj: - assert obj.c_ob_refcnt > 0 - obj.c_ob_refcnt -= 1 - if obj.c_ob_refcnt == 0 and \ + _decref(obj) + + if obj.c_ob_refcnt & rawrefcount.REFCNT_MASK == 0 and \ rawrefcount.get_trialdeletion_phase() != 1: - debug_print("dealloc", obj) - _Py_Dealloc(space, obj) - elif obj.c_ob_refcnt == rawrefcount.REFCNT_FROM_PYPY: - debug_print("dead", obj) - else: + if obj.c_ob_refcnt & rawrefcount.REFCNT_FROM_PYPY == 0: + _Py_Dealloc(space, obj) + elif obj.c_ob_refcnt & rawrefcount.REFCNT_CLR_GREEN == 0: if rawrefcount.get_trialdeletion_phase() == 0: trial_delete(space, obj) else: get_w_obj_and_decref(space, obj) + at specialize.ll() +def refcnt_overflow(space, obj): + if is_pyobj(obj): + pyobj = rffi.cast(PyObject, obj) + else: + pyobj = as_pyobj(space, obj, None) + if pyobj: + if (pyobj.c_ob_refcnt & rawrefcount.REFCNT_MASK == + rawrefcount.REFCNT_OVERFLOW): + return rawrefcount.REFCNT_OVERFLOW + else: + return (pyobj.c_ob_refcnt & rawrefcount.REFCNT_MASK) \ + + rawrefcount.overflow_get(pyobj) + return 0 + def traverse(space, obj, visit): from pypy.module.cpyext.api import generic_cpy_call if obj.c_ob_type and obj.c_ob_type.c_tp_traverse: @@ -343,7 +374,7 @@ @slot_function([PyObject, rffi.VOIDP], rffi.INT_real, error=-1) def visit_decref(space, obj, args): - obj.c_ob_refcnt = obj.c_ob_refcnt - 1 + _decref(obj) debug_print("visited dec", obj, "new refcnt", obj.c_ob_refcnt) if (obj not in rawrefcount.get_visited()): rawrefcount.add_visited(obj) @@ -353,7 +384,7 @@ @slot_function([PyObject, rffi.VOIDP], rffi.INT_real, error=-1) def visit_incref(space, obj, args): - obj.c_ob_refcnt = obj.c_ob_refcnt + 1 + _incref(obj) debug_print("visited inc", obj, "new refcnt", obj.c_ob_refcnt) if (obj not in rawrefcount.get_visited()): rawrefcount.add_visited(obj) @@ -364,6 +395,7 @@ @specialize.ll() def trial_delete(space, obj): if not obj.c_ob_type or not obj.c_ob_type.c_tp_traverse: + obj.c_ob_refcnt = obj.c_ob_refcnt | rawrefcount.REFCNT_CLR_GREEN return from pypy.module.cpyext.slotdefs import llslot @@ -427,6 +459,10 @@ def Py_DecRef(space, obj): decref(space, obj) + at cpython_api([PyObject], lltype.SignedLongLong, error=CANNOT_FAIL) +def _Py_RefCnt_Overflow(space, obj): + return refcnt_overflow(space, obj) + @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): obj.c_ob_refcnt = 1 @@ -444,10 +480,6 @@ rawrefcount.mark_deallocating(w_marker_deallocating, obj) generic_cpy_call(space, pto.c_tp_dealloc, obj) - at cpython_api([PyObject], lltype.Void) -def _Py_Mark(space, obj): - rawrefcount.add_marked(obj) - @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) def _Py_HashPointer(space, ptr): return rffi.cast(lltype.Signed, ptr) diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -1,5 +1,6 @@ # encoding: utf-8 import pytest +from rpython.rlib import rawrefcount from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w @@ -9,8 +10,10 @@ PyString_ConcatAndDel, PyString_Format, PyString_InternFromString, PyString_AsEncodedObject, PyString_AsDecodedObject, _PyString_Eq, _PyString_Join) -from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call -from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref +from pypy.module.cpyext.api import ( + PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call) +from pypy.module.cpyext.pyobject import ( + Py_DecRef, Py_IncRef, _Py_RefCnt_Overflow, from_ref, make_ref) from pypy.module.cpyext.object import PyObject_AsCharBuffer from pypy.module.cpyext.api import PyTypeObjectPtr @@ -498,9 +501,9 @@ ref = make_ref(space, space.wrap('abc')) ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') ptr[0] = ref - prev_refcnt = ref.c_ob_refcnt + prev_refcnt = ref.c_ob_refcnt & rawrefcount.REFCNT_MASK PyString_Concat(space, ptr, space.wrap('def')) - assert ref.c_ob_refcnt == prev_refcnt - 1 + assert ref.c_ob_refcnt & rawrefcount.REFCNT_MASK == prev_refcnt - 1 assert space.str_w(from_ref(space, ptr[0])) == 'abcdef' with pytest.raises(OperationError): PyString_Concat(space, ptr, space.w_None) @@ -536,9 +539,9 @@ w_text = space.wrap("text") ref = make_ref(space, w_text) - prev_refcnt = ref.c_ob_refcnt + prev_refcnt = ref.c_ob_refcnt & rawrefcount.REFCNT_MASK assert PyObject_AsCharBuffer(space, ref, bufp, lenp) == 0 - assert ref.c_ob_refcnt == prev_refcnt + assert ref.c_ob_refcnt & rawrefcount.REFCNT_MASK == prev_refcnt assert lenp[0] == 4 assert rffi.charp2str(bufp[0]) == 'text' lltype.free(bufp, flavor='raw') @@ -597,3 +600,53 @@ w_seq = space.wrap(['a', 'b']) w_joined = _PyString_Join(space, w_sep, w_seq) assert space.unwrap(w_joined) == 'ab' + + def test_refcnt_overflow(self, space): + ref1 = make_ref(space, space.wrap('foo')) + ref1.c_ob_refcnt = rawrefcount.REFCNT_OVERFLOW - 1 + + Py_IncRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + + Py_IncRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + 1 + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + 1 + + Py_IncRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + 1 + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + 2 + + Py_IncRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + 1 + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + 3 + + Py_DecRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + 1 + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + 2 + + Py_DecRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + 1 + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + 1 + + Py_DecRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW + assert _Py_RefCnt_Overflow(space, ref1) \ + == rawrefcount.REFCNT_OVERFLOW + + Py_DecRef(space, ref1) + assert ref1.c_ob_refcnt & rawrefcount.REFCNT_MASK \ + == rawrefcount.REFCNT_OVERFLOW - 1 diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -4,7 +4,7 @@ # This is meant for pypy's cpyext module, but is a generally # useful interface over our GC. XXX "pypy" should be removed here # -import sys, weakref, py +import sys, weakref, py, math from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import we_are_translated, specialize, not_rpython from rpython.rtyper.extregistry import ExtRegistryEntry @@ -12,8 +12,37 @@ from rpython.rlib import rgc -REFCNT_FROM_PYPY = sys.maxint // 4 + 1 -REFCNT_FROM_PYPY_LIGHT = REFCNT_FROM_PYPY + (sys.maxint // 2 + 1) +MAX_BIT = int(math.log(sys.maxint, 2)) + +REFCNT_FROM_PYPY = 1 << MAX_BIT - 2 +REFCNT_FROM_PYPY_LIGHT = (1 << MAX_BIT - 1) + REFCNT_FROM_PYPY + +# Object either in Roots or Cycle Buffer (= Link-object exists) +REFCNT_CYCLE_BUFFERED = 1 << MAX_BIT - 3 + +# Offsets and sizes +REFCNT_CLR_OFFS = MAX_BIT - 6 +REFCNT_CRC_OFFS = REFCNT_CLR_OFFS / 2 +REFCNT_BITS = REFCNT_CRC_OFFS - 1 + +# Concurrent cycle collection colors +REFCNT_CLR_BLACK = 0 << REFCNT_CLR_OFFS # In use or free (Default) +REFCNT_CLR_GRAY = 1 << REFCNT_CLR_OFFS # Possible member of cycle +REFCNT_CLR_WHITE = 2 << REFCNT_CLR_OFFS # Member of garbage cycle +REFCNT_CLR_PURPLE = 3 << REFCNT_CLR_OFFS # Possible root of cycle +REFCNT_CLR_GREEN = 4 << REFCNT_CLR_OFFS # Acyclic +REFCNT_CLR_RED = 5 << REFCNT_CLR_OFFS # Cand cycle undergoing SIGMA-comp. +REFCNT_CLR_ORANGE = 6 << REFCNT_CLR_OFFS # Cand cycle awaiting epoch boundary + +# Cyclic reference count with overflow bit +REFCNT_CRC_OVERFLOW = 1 << REFCNT_CRC_OFFS + REFCNT_BITS +REFCNT_CRC_MASK = (1 << REFCNT_CRC_OFFS + REFCNT_BITS + 1) - 1 +REFCNT_CRC = 1 < REFCNT_CRC_OFFS + +# True reference count with overflow bit +REFCNT_OVERFLOW = 1 << REFCNT_BITS +REFCNT_MASK = (1 << REFCNT_BITS + 1) - 1 + RAWREFCOUNT_DEALLOC_TRIGGER = lltype.Ptr(lltype.FuncType([], lltype.Void)) @@ -24,25 +53,40 @@ return res _trial_deletion_phase = 0 -_visited = [] -_marked = [] def set_trialdeletion_phase(value): _trial_deletion_phase = value def get_trialdeletion_phase(): return _trial_deletion_phase + +_visited = [] + def add_visited(obj): _visited.append(obj) def get_visited(): return _visited def clear_visited(): del _visited[:] -def add_marked(obj): - _marked.append(obj) -def get_marked(): - return marked -def clear_marked(): - del _marked[:] + +_refcount_overflow = dict() + +# TODO: if object moves, address changes! +def overflow_new(obj): + _refcount_overflow[id(obj)] = 0 +def overflow_add(obj): + _refcount_overflow[id(obj)] += 1 +def overflow_sub(obj): + c = _refcount_overflow[id(obj)] + if c > 0: + _refcount_overflow[id(obj)] = c - 1 + return False + else: + _refcount_overflow.pop(id(obj)) + return True +def overflow_get(obj): + return _refcount_overflow[id(obj)] + +# TODO: _cyclic_refcount_overflow = dict() @not_rpython def init(dealloc_trigger_callback=None): @@ -142,7 +186,8 @@ wr_p_list = [] new_p_list = [] for ob in reversed(_p_list): - if ob.c_ob_refcnt not in (REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT): + if ob.c_ob_refcnt & REFCNT_MASK > 0 \ + or ob.c_ob_refcnt & REFCNT_FROM_PYPY == 0: new_p_list.append(ob) else: p = detach(ob, wr_p_list) @@ -175,7 +220,8 @@ if ob.c_ob_refcnt >= REFCNT_FROM_PYPY_LIGHT: ob.c_ob_refcnt -= REFCNT_FROM_PYPY_LIGHT ob.c_ob_pypy_link = 0 - if ob.c_ob_refcnt == 0: + if ob.c_ob_refcnt & REFCNT_MASK == 0 \ + and ob.c_ob_refcnt < REFCNT_FROM_PYPY: lltype.free(ob, flavor='raw', track_allocation=track_allocation) else: @@ -183,8 +229,9 @@ assert ob.c_ob_refcnt < int(REFCNT_FROM_PYPY_LIGHT * 0.99) ob.c_ob_refcnt -= REFCNT_FROM_PYPY ob.c_ob_pypy_link = 0 - if ob.c_ob_refcnt == 0: - ob.c_ob_refcnt = 1 + if ob.c_ob_refcnt & REFCNT_MASK == 0 \ + and ob.c_ob_refcnt < REFCNT_FROM_PYPY: + ob.c_ob_refcnt += 1 _d_list.append(ob) return None From pypy.commits at gmail.com Fri Jul 14 12:08:59 2017 From: pypy.commits at gmail.com (danchr) Date: Fri, 14 Jul 2017 09:08:59 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-mac-embedding: fix building against latest LibreSSL Message-ID: <5968ec9b.ba85df0a.ca01f.d72b@mx.google.com> Author: Dan Villiom Podlaski Christiansen Branch: py3.5-mac-embedding Changeset: r91869:47576a3f87dc Date: 2017-07-14 17:59 +0200 http://bitbucket.org/pypy/pypy/changeset/47576a3f87dc/ Log: fix building against latest LibreSSL diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py @@ -48,6 +48,9 @@ #else #define CRYPTOGRAPHY_IS_LIBRESSL 0 #endif + +#define CRYPTOGRAPHY_LIBRESSL_251_OR_GREATER \ + (CRYPTOGRAPHY_IS_LIBRESSL && LIBRESSL_VERSION_NUMBER >= 0x20501000) """ TYPES = """ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py @@ -221,10 +221,12 @@ static const long X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM = 0; static const long X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED = 0; static const long X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 = 0; +#if !CRYPTOGRAPHY_LIBRESSL_251_OR_GREATER static const long X509_V_ERR_HOSTNAME_MISMATCH = 0; static const long X509_V_ERR_EMAIL_MISMATCH = 0; static const long X509_V_ERR_IP_ADDRESS_MISMATCH = 0; #endif +#endif /* OpenSSL 1.0.2beta2+ verification parameters */ #if CRYPTOGRAPHY_OPENSSL_102BETA2_OR_GREATER && \ @@ -238,6 +240,14 @@ static const long X509_V_FLAG_SUITEB_192_LOS = 0; static const long X509_V_FLAG_SUITEB_128_LOS = 0; +#if CRYPTOGRAPHY_LIBRESSL_251_OR_GREATER +int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *, const char *, size_t); +int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *, const char *, size_t); +int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *, const unsigned char *, + size_t); +int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *, const char *); +void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *, unsigned int); +#else int (*X509_VERIFY_PARAM_set1_host)(X509_VERIFY_PARAM *, const char *, size_t) = NULL; int (*X509_VERIFY_PARAM_set1_email)(X509_VERIFY_PARAM *, const char *, @@ -248,6 +258,7 @@ void (*X509_VERIFY_PARAM_set_hostflags)(X509_VERIFY_PARAM *, unsigned int) = NULL; #endif +#endif /* OpenSSL 1.0.2+ or Solaris's backport */ #ifdef X509_V_FLAG_PARTIAL_CHAIN From pypy.commits at gmail.com Fri Jul 14 12:09:01 2017 From: pypy.commits at gmail.com (danchr) Date: Fri, 14 Jul 2017 09:09:01 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-mac-embedding: add support to package.py for embedding libraries not included with OS X Message-ID: <5968ec9d.090b1c0a.fecf.97b7@mx.google.com> Author: Dan Villiom Podlaski Christiansen Branch: py3.5-mac-embedding Changeset: r91870:ed548e40171b Date: 2017-07-14 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/ed548e40171b/ Log: add support to package.py for embedding libraries not included with OS X diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -58,6 +58,7 @@ ^rpython/rlib/rvmprof/src/shared/libbacktrace/config.h$ ^rpython/rlib/rvmprof/src/shared/libbacktrace/config.log$ ^rpython/rlib/rvmprof/src/shared/libbacktrace/config.status$ +^pypy/tool/dest$ ^pypy/goal/pypy-translation-snapshot$ ^pypy/goal/pypy-c ^pypy/goal/pypy3-c diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py --- a/pypy/tool/build_cffi_imports.py +++ b/pypy/tool/build_cffi_imports.py @@ -22,12 +22,98 @@ "xx": None, # for testing: 'None' should be completely ignored } -def create_cffi_import_libraries(pypy_c, options, basedir, only=None): +# for distribution, we may want to fetch dependencies not provided by +# the OS, such as a recent openssl/libressl or liblzma/xz. +cffi_dependencies = { + 'lzma': ('https://tukaani.org/xz/xz-5.2.3.tar.gz', []), + 'ssl': ('http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.6.0.tar.gz', + ['--without-openssldir']), + '_gdbm': ('ftp://ftp.gnu.org/gnu/gdbm/gdbm-1.13.tar.gz', + ['--without-readline']), +} + + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` + """ + import tarfile # late import for breaking circular dependency + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + + +def _build_dependency(name, destdir): + import multiprocessing + import shutil + import subprocess + + try: + from urllib.request import urlretrieve + except ImportError: + from urllib import urlretrieve + + try: + url, args = cffi_dependencies[name] + except KeyError: + return 0, None, None + + archive = os.path.join(destdir, url.rsplit('/', 1)[-1]) + + # next, fetch the archive to disk, if needed + if not os.path.exists(archive): + urlretrieve(url, archive) + + # extract the archive into our destination directory + _unpack_tarfile(archive, destdir) + + # configure & build it + sources = os.path.join( + destdir, + os.path.basename(archive)[:-7], + ) + + from rpython.tool.runsubprocess import run_subprocess + + status, stdout, stderr = run_subprocess( + './configure', + [ + '--prefix=/usr', + '--disable-shared', + '--enable-silent-rules', + '--disable-dependency-tracking', + ] + args, + cwd=sources, + ) + + if status != 0: + return status, stdout, stderr + + status, stdout, stderr = run_subprocess( + 'make', + [ + '-s', '-j' + str(multiprocessing.cpu_count() + 1), + 'install', 'DESTDIR={}/'.format(destdir), + ], + cwd=sources, + ) + + return status, stdout, stderr + + +def create_cffi_import_libraries(pypy_c, options, basedir, only=None, + embed_dependencies=False): from rpython.tool.runsubprocess import run_subprocess shutil.rmtree(str(join(basedir,'lib_pypy','__pycache__')), ignore_errors=True) failures = [] + for key, module in sorted(cffi_build_scripts.items()): if only and key not in only: print("* SKIPPING", key, '(not specified in --only)') @@ -40,9 +126,33 @@ else: args = ['-c', 'import ' + module] cwd = None + env = os.environ.copy() + print('*', ' '.join(args), file=sys.stderr) + if embed_dependencies: + destdir = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'dest') + shutil.rmtree(destdir, ignore_errors=True) + os.makedirs(destdir) + + status, stdout, stderr = _build_dependency(key, destdir) + + if status != 0: + failures.append((key, module)) + print("stdout:") + print(stdout.decode('utf-8')) + print("stderr:") + print(stderr.decode('utf-8')) + continue + + env['CPPFLAGS'] = \ + '-I{}/usr/include {}'.format(destdir, env.get('CPPFLAGS', '')) + env['LDFLAGS'] = \ + '-L{}/usr/lib {}'.format(destdir, env.get('LDFLAGS', '')) + try: - status, stdout, stderr = run_subprocess(str(pypy_c), args, cwd=cwd) + status, stdout, stderr = run_subprocess(str(pypy_c), args, + cwd=cwd, env=env) if status != 0: failures.append((key, module)) print("stdout:") @@ -73,6 +183,8 @@ ' you can specify an alternative pypy vm here') parser.add_argument('--only', dest='only', default=None, help='Only build the modules delimited by a colon. E.g. ssl,sqlite') + parser.add_argument('--embed-dependencies', dest='embed_dependencies', action='store_true', + help='embed dependencies for distribution') args = parser.parse_args() exename = join(os.getcwd(), args.exefile) @@ -89,7 +201,8 @@ only = None else: only = set(args.only.split(',')) - failures = create_cffi_import_libraries(exename, options, basedir, only=only) + failures = create_cffi_import_libraries(exename, options, basedir, only=only, + embed_dependencies=args.embed_dependencies) if len(failures) > 0: print('*** failed to build the CFFI modules %r' % ( [f[1] for f in failures],), file=sys.stderr) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -83,7 +83,11 @@ if not _fake and not pypy_runs(pypy_c): raise OSError("Running %r failed!" % (str(pypy_c),)) if not options.no_cffi: - failures = create_cffi_import_libraries(str(pypy_c), options, str(basedir)) + failures = create_cffi_import_libraries( + str(pypy_c), options, str(basedir), + embed_dependencies=options.embed_dependencies, + ) + for key, module in failures: print >>sys.stderr, """!!!!!!!!!!\nBuilding {0} bindings failed. You can either install development headers package, @@ -296,12 +300,17 @@ help='destination dir for archive') parser.add_argument('--override_pypy_c', type=str, default='', help='use as pypy3 exe instead of pypy/goal/pypy3-c') + parser.add_argument('--embedded-dependencies', dest='embed_dependencies', + action='store_true', + help='embed dependencies for distribution') options = parser.parse_args(args) if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): options.nostrip = True if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): options.no_tk = True + if os.environ.has_key("PYPY_EMBED_DEPENDENCIES"): + options.embed_dependencies = True if not options.builddir: # The import actually creates the udir directory from rpython.tool.udir import udir From pypy.commits at gmail.com Sat Jul 15 04:50:30 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 15 Jul 2017 01:50:30 -0700 (PDT) Subject: [pypy-commit] pypy default: (ronan, arigo) Message-ID: <5969d756.ba85df0a.ca01f.da98@mx.google.com> Author: Armin Rigo Branch: Changeset: r91871:20f7723ff895 Date: 2017-07-15 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/20f7723ff895/ Log: (ronan, arigo) Issue #2604: skip this test (fails on some Linux because pypy uses the new getrandom() syscall) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -580,6 +580,7 @@ "getentropy() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") + @test_support.impl_detail(pypy=False) # on Linux, may use getrandom() def test_urandom_failure(self): # Check urandom() failing when it is not able to open /dev/random. # We spawn a new process to make the test more robust (if getrlimit() diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -34,6 +34,7 @@ from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate +from pypy.objspace.std.noneobject import W_NoneObject __all__ = ['W_ListObject', 'make_range_list', 'make_empty_list_with_size'] @@ -121,6 +122,8 @@ elif type(w_obj) is W_FloatObject: if longlong2float.can_encode_float(space.float_w(w_obj)): continue # ok + elif type(w_obj) is W_NoneObject: + continue # ok break else: return space.fromcache(IntOrFloatListStrategy) @@ -1728,7 +1731,8 @@ return True def switch_to_next_strategy(self, w_list, w_sample_item): - if type(w_sample_item) is W_FloatObject: + if (type(w_sample_item) is W_FloatObject or + type(w_sample_item) is W_NoneObject): if self.switch_to_int_or_float_strategy(w_list): # yes, we can switch to IntOrFloatListStrategy # (ignore here the extremely unlikely case where @@ -1838,12 +1842,16 @@ return True def switch_to_next_strategy(self, w_list, w_sample_item): + try_switch = False if type(w_sample_item) is W_IntObject: sample_intval = self.space.int_w(w_sample_item) if longlong2float.can_encode_int32(sample_intval): - if self.switch_to_int_or_float_strategy(w_list): - # yes, we can switch to IntOrFloatListStrategy - return + try_switch = True + elif type(w_sample_item) is W_NoneObject: + try_switch = True + if try_switch and self.switch_to_int_or_float_strategy(w_list): + # yes, we can switch to IntOrFloatListStrategy + return # no, fall back to ObjectListStrategy w_list.switch_to_object_strategy() @@ -1854,6 +1862,8 @@ _none_value = longlong2float.float2longlong(0.0) def wrap(self, llval): + if llval == longlong2float.nan_encoded_none: + return self.space.w_None if longlong2float.is_int32_from_longlong_nan(llval): intval = longlong2float.decode_int32_from_longlong_nan(llval) return self.space.newint(intval) @@ -1865,6 +1875,8 @@ if type(w_int_or_float) is W_IntObject: intval = self.space.int_w(w_int_or_float) return longlong2float.encode_int32_into_longlong_nan(intval) + elif type(w_int_or_float) is W_NoneObject: + return longlong2float.nan_encoded_none else: floatval = self.space.float_w(w_int_or_float) return longlong2float.float2longlong(floatval) @@ -1880,6 +1892,8 @@ elif type(w_obj) is W_FloatObject: floatval = self.space.float_w(w_obj) return longlong2float.can_encode_float(floatval) + elif type(w_obj) is W_NoneObject: + return True else: return False diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -104,10 +104,12 @@ # ____________________________________________________________ -# For encoding integers inside nonstandard NaN bit patterns. +# For encoding integers or none inside nonstandard NaN bit patterns. # ff ff ff fe xx xx xx xx (signed 32-bit int) +# ff ff ff ff ff ff ff ac (none) nan_high_word_int32 = -2 # -2 == (int)0xfffffffe nan_encoded_zero = r_int64(nan_high_word_int32 << 32) +nan_encoded_none = r_int64(-84) def encode_int32_into_longlong_nan(value): return (nan_encoded_zero + @@ -127,7 +129,9 @@ return value == rffi.cast(lltype.Signed, rffi.cast(rffi.INT, value)) def can_encode_float(value): - return intmask(float2longlong(value) >> 32) != nan_high_word_int32 + return intmask(float2longlong(value) >> 33) != -1 +assert (nan_high_word_int32 >> 1) == -1 +assert (nan_encoded_none >> 33) == -1 def maybe_decode_longlong_as_float(value): # Decode a longlong value. If a float, just return it as a float. From pypy.commits at gmail.com Sat Jul 15 04:52:09 2017 From: pypy.commits at gmail.com (rlamy) Date: Sat, 15 Jul 2017 01:52:09 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Revert opcode.py to the 3.5 version until we implement the new bytecodes Message-ID: <5969d7b9.84e31c0a.e6019.c934@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r91872:fef10d5acba0 Date: 2017-07-15 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/fef10d5acba0/ Log: Revert opcode.py to the 3.5 version until we implement the new bytecodes diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py --- a/lib-python/3/opcode.py +++ b/lib-python/3/opcode.py @@ -31,10 +31,12 @@ haslocal = [] hascompare = [] hasfree = [] -hasnargs = [] # unused +hasnargs = [] opmap = {} -opname = ['<%r>' % (op,) for op in range(256)] +opname = [''] * 256 +for op in range(256): opname[op] = '<%r>' % (op,) +del op def def_op(name, op): opname[op] = name @@ -119,7 +121,7 @@ def_op('RETURN_VALUE', 83) def_op('IMPORT_STAR', 84) -def_op('SETUP_ANNOTATIONS', 85) + def_op('YIELD_VALUE', 86) def_op('POP_BLOCK', 87) def_op('END_FINALLY', 88) @@ -169,12 +171,13 @@ haslocal.append(125) def_op('DELETE_FAST', 126) # Local variable number haslocal.append(126) -name_op('STORE_ANNOTATION', 127) # Index in name list def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3) -def_op('CALL_FUNCTION', 131) # #args -def_op('MAKE_FUNCTION', 132) # Flags +def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8) +hasnargs.append(131) +def_op('MAKE_FUNCTION', 132) # Number of args with default values def_op('BUILD_SLICE', 133) # Number of items +def_op('MAKE_CLOSURE', 134) def_op('LOAD_CLOSURE', 135) hasfree.append(135) def_op('LOAD_DEREF', 136) @@ -184,8 +187,12 @@ def_op('DELETE_DEREF', 138) hasfree.append(138) -def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs -def_op('CALL_FUNCTION_EX', 142) # Flags +def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8) +hasnargs.append(140) +def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8) +hasnargs.append(141) +def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8) +hasnargs.append(142) jrel_op('SETUP_WITH', 143) @@ -196,6 +203,8 @@ def_op('LOAD_CLASSDEREF', 148) hasfree.append(148) +jrel_op('SETUP_ASYNC_WITH', 154) + def_op('EXTENDED_ARG', 144) EXTENDED_ARG = 144 @@ -205,12 +214,8 @@ def_op('BUILD_TUPLE_UNPACK', 152) def_op('BUILD_SET_UNPACK', 153) -jrel_op('SETUP_ASYNC_WITH', 154) - -def_op('FORMAT_VALUE', 155) -def_op('BUILD_CONST_KEY_MAP', 156) -def_op('BUILD_STRING', 157) -def_op('BUILD_TUPLE_UNPACK_WITH_CALL', 158) +def_op('FORMAT_VALUE', 155) # in CPython 3.6, but available in PyPy from 3.5 +def_op('BUILD_STRING', 157) # in CPython 3.6, but available in PyPy from 3.5 # pypy modification, experimental bytecode def_op('LOOKUP_METHOD', 201) # Index in name list From pypy.commits at gmail.com Sat Jul 15 06:36:45 2017 From: pypy.commits at gmail.com (rmariano) Date: Sat, 15 Jul 2017 03:36:45 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add posix.fspath Message-ID: <5969f03d.898adf0a.be359.5088@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91873:5041f795bfb2 Date: 2017-07-15 12:34 +0200 http://bitbucket.org/pypy/pypy/changeset/5041f795bfb2/ Log: Add posix.fspath Add fspath to posix, so it's available in os.fspath. New feature added in CPython 3.6: https://docs.python.org/3/library/os.html#os.fspath diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -83,6 +83,7 @@ 'scandir': 'interp_scandir.scandir', 'get_inheritable': 'interp_posix.get_inheritable', 'set_inheritable': 'interp_posix.set_inheritable', + 'fspath': 'interp_posix.fspath', } if hasattr(os, 'chown'): @@ -228,7 +229,7 @@ 'POSIX_FADV_RANDOM', 'POSIX_FADV_NOREUSE', 'POSIX_FADV_DONTNEED']: assert getattr(rposix, _name) is not None, "missing %r" % (_name,) interpleveldefs[_name] = 'space.wrap(%d)' % getattr(rposix, _name) - + if hasattr(rposix, 'sched_get_priority_max'): interpleveldefs['sched_get_priority_max'] = 'interp_posix.sched_get_priority_max' interpleveldefs['sched_get_priority_min'] = 'interp_posix.sched_get_priority_min' diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -2437,8 +2437,8 @@ @unwrap_spec(policy=int) def sched_get_priority_max(space, policy): - """returns the maximum priority value that - can be used with the scheduling algorithm + """returns the maximum priority value that + can be used with the scheduling algorithm identified by policy """ while True: @@ -2452,7 +2452,7 @@ @unwrap_spec(policy=int) def sched_get_priority_min(space, policy): """returns the minimum priority value that - can be used with the scheduling algorithm + can be used with the scheduling algorithm identified by policy """ while True: @@ -2462,3 +2462,36 @@ wrap_oserror(space, e, eintr_retry=True) else: return space.newint(s) + + +def fspath(space, w_path): + """ + Return the file system path representation of the object. + + If the object is str or bytes, then allow it to pass through as-is. If the + object defines __fspath__(), then return the result of that method. All other + types raise a TypeError. + """ + if (space.isinstance_w(w_path, space.w_text) or + space.isinstance_w(w_path, space.w_bytes)): + return w_path + + w_fspath_method = space.lookup(w_path, '__fspath__') + if w_fspath_method is None: + raise oefmt( + space.w_TypeError, + 'expected str, bytes or os.PathLike object, not %T', + w_path + ) + + w_result = space.get_and_call_function(w_fspath_method, w_path) + if (space.isinstance_w(w_result, space.w_text) or + space.isinstance_w(w_result, space.w_bytes)): + return w_result + + raise oefmt( + space.w_TypeError, + 'expected %T.__fspath__() to return str or bytes, not %T', + w_path, + w_result + ) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -964,7 +964,7 @@ assert posix.sched_get_priority_min(posix.SCHED_OTHER) != -1 if getattr(posix, 'SCHED_BATCH', None): assert posix.sched_get_priority_min(posix.SCHED_BATCH) != -1 - + if hasattr(rposix, 'sched_get_priority_min'): def test_os_sched_priority_max_greater_than_min(self): posix, os = self.posix, self.os @@ -1411,6 +1411,32 @@ e = raises(OSError, self.posix.symlink, 'bok', '/nonexistentdir/boz') assert str(e.value).endswith(": 'bok' -> '/nonexistentdir/boz'") + def test_os_fspath(self): + assert hasattr(self.posix, 'fspath') + raises(TypeError, self.posix.fspath, None) + e = raises(TypeError, self.posix.fspath, 42) + assert str(e.value).endswith('int') + string = 'string' + assert self.posix.fspath(string) == string + assert self.posix.fspath(b'bytes') == b'bytes' + class Sample: + def __fspath__(self): + return 'sample' + + assert self.posix.fspath(Sample()) == 'sample' + + class BSample: + def __fspath__(self): + return b'binary sample' + + assert self.posix.fspath(BSample()) == b'binary sample' + + class WrongSample: + def __fspath__(self): + return 4 + + raises(TypeError, self.posix.fspath, WrongSample()) + class AppTestEnvironment(object): def setup_class(cls): From pypy.commits at gmail.com Sat Jul 15 06:42:42 2017 From: pypy.commits at gmail.com (rmariano) Date: Sat, 15 Jul 2017 03:42:42 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Comment out async generators temporarily Message-ID: <5969f1a2.89e51c0a.6c323.827a@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91874:b3e217ab4fd8 Date: 2017-07-15 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/b3e217ab4fd8/ Log: Comment out async generators temporarily This is not yet supported (added in Python 3.6), so at the moment it's failing other unrelated parts of the code. Should be enabled again, once it's supported. diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py --- a/lib-python/3/_collections_abc.py +++ b/lib-python/3/_collections_abc.py @@ -61,10 +61,12 @@ _coro.close() # Prevent ResourceWarning del _coro ## asynchronous generator ## -async def _ag(): yield -_ag = _ag() -async_generator = type(_ag) -del _ag +## This should be reverted, once async generators are supported. +## Temporary fix. +#async def _ag(): yield +#_ag = _ag() +#async_generator = type(_ag) +#del _ag ### ONE-TRICK PONIES ### @@ -237,7 +239,7 @@ return NotImplemented -AsyncGenerator.register(async_generator) +# AsyncGenerator.register(async_generator) class Iterable(metaclass=ABCMeta): From pypy.commits at gmail.com Sat Jul 15 06:42:45 2017 From: pypy.commits at gmail.com (rmariano) Date: Sat, 15 Jul 2017 03:42:45 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Basic support for getfilesystemencodeerrors Message-ID: <5969f1a5.a29adf0a.2581e.8893@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91875:095ebf05a700 Date: 2017-07-15 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/095ebf05a700/ Log: Basic support for getfilesystemencodeerrors Added in Python 3.6: https://docs.python.org/3/library/sys.html#sys. getfilesystemencodeerrors Basic default support, so it doesn't fail with basic checks. Requires proper implementation later on. diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -87,6 +87,7 @@ 'getdefaultencoding' : 'interp_encoding.getdefaultencoding', 'getfilesystemencoding' : 'interp_encoding.getfilesystemencoding', + 'getfilesystemencodeerrors': 'interp_encoding.getfilesystemencodeerrors', 'float_info' : 'system.get_float_info(space)', 'int_info' : 'system.get_int_info(space)', diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -48,3 +48,7 @@ if space.sys.filesystemencoding is None: return space.newtext(base_encoding) return space.newtext(space.sys.filesystemencoding) + + +def getfilesystemencodeerrors(space): + return space.newtext('surrogateescape') From pypy.commits at gmail.com Sat Jul 15 06:58:46 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 15 Jul 2017 03:58:46 -0700 (PDT) Subject: [pypy-commit] pypy default: Ooops, backout these changes that are not ready Message-ID: <5969f566.01571c0a.a2bad.b6a6@mx.google.com> Author: Armin Rigo Branch: Changeset: r91876:68a1a6af2bf4 Date: 2017-07-15 12:58 +0200 http://bitbucket.org/pypy/pypy/changeset/68a1a6af2bf4/ Log: Ooops, backout these changes that are not ready diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -34,7 +34,6 @@ from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate -from pypy.objspace.std.noneobject import W_NoneObject __all__ = ['W_ListObject', 'make_range_list', 'make_empty_list_with_size'] @@ -122,8 +121,6 @@ elif type(w_obj) is W_FloatObject: if longlong2float.can_encode_float(space.float_w(w_obj)): continue # ok - elif type(w_obj) is W_NoneObject: - continue # ok break else: return space.fromcache(IntOrFloatListStrategy) @@ -1731,8 +1728,7 @@ return True def switch_to_next_strategy(self, w_list, w_sample_item): - if (type(w_sample_item) is W_FloatObject or - type(w_sample_item) is W_NoneObject): + if type(w_sample_item) is W_FloatObject: if self.switch_to_int_or_float_strategy(w_list): # yes, we can switch to IntOrFloatListStrategy # (ignore here the extremely unlikely case where @@ -1842,16 +1838,12 @@ return True def switch_to_next_strategy(self, w_list, w_sample_item): - try_switch = False if type(w_sample_item) is W_IntObject: sample_intval = self.space.int_w(w_sample_item) if longlong2float.can_encode_int32(sample_intval): - try_switch = True - elif type(w_sample_item) is W_NoneObject: - try_switch = True - if try_switch and self.switch_to_int_or_float_strategy(w_list): - # yes, we can switch to IntOrFloatListStrategy - return + if self.switch_to_int_or_float_strategy(w_list): + # yes, we can switch to IntOrFloatListStrategy + return # no, fall back to ObjectListStrategy w_list.switch_to_object_strategy() @@ -1862,8 +1854,6 @@ _none_value = longlong2float.float2longlong(0.0) def wrap(self, llval): - if llval == longlong2float.nan_encoded_none: - return self.space.w_None if longlong2float.is_int32_from_longlong_nan(llval): intval = longlong2float.decode_int32_from_longlong_nan(llval) return self.space.newint(intval) @@ -1875,8 +1865,6 @@ if type(w_int_or_float) is W_IntObject: intval = self.space.int_w(w_int_or_float) return longlong2float.encode_int32_into_longlong_nan(intval) - elif type(w_int_or_float) is W_NoneObject: - return longlong2float.nan_encoded_none else: floatval = self.space.float_w(w_int_or_float) return longlong2float.float2longlong(floatval) @@ -1892,8 +1880,6 @@ elif type(w_obj) is W_FloatObject: floatval = self.space.float_w(w_obj) return longlong2float.can_encode_float(floatval) - elif type(w_obj) is W_NoneObject: - return True else: return False diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -104,12 +104,10 @@ # ____________________________________________________________ -# For encoding integers or none inside nonstandard NaN bit patterns. +# For encoding integers inside nonstandard NaN bit patterns. # ff ff ff fe xx xx xx xx (signed 32-bit int) -# ff ff ff ff ff ff ff ac (none) nan_high_word_int32 = -2 # -2 == (int)0xfffffffe nan_encoded_zero = r_int64(nan_high_word_int32 << 32) -nan_encoded_none = r_int64(-84) def encode_int32_into_longlong_nan(value): return (nan_encoded_zero + @@ -129,9 +127,7 @@ return value == rffi.cast(lltype.Signed, rffi.cast(rffi.INT, value)) def can_encode_float(value): - return intmask(float2longlong(value) >> 33) != -1 -assert (nan_high_word_int32 >> 1) == -1 -assert (nan_encoded_none >> 33) == -1 + return intmask(float2longlong(value) >> 32) != nan_high_word_int32 def maybe_decode_longlong_as_float(value): # Decode a longlong value. If a float, just return it as a float. From pypy.commits at gmail.com Sat Jul 15 09:20:00 2017 From: pypy.commits at gmail.com (rmariano) Date: Sat, 15 Jul 2017 06:20:00 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add ModuleNotFoundError Message-ID: <596a1680.01571c0a.a2bad.de2e@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91877:5a01e5b5546a Date: 2017-07-15 15:18 +0200 http://bitbucket.org/pypy/pypy/changeset/5a01e5b5546a/ Log: Add ModuleNotFoundError New exception added in Python 3.6. It's a subclass of ImportError. h ttps://docs.python.org/3/library/exceptions.html#ModuleNotFoundError diff --git a/pypy/module/exceptions/__init__.py b/pypy/module/exceptions/__init__.py --- a/pypy/module/exceptions/__init__.py +++ b/pypy/module/exceptions/__init__.py @@ -4,7 +4,7 @@ class Module(MixedModule): applevel_name = '__exceptions__' appleveldefs = {} - + interpleveldefs = { 'ArithmeticError' : 'interp_exceptions.W_ArithmeticError', 'AssertionError' : 'interp_exceptions.W_AssertionError', @@ -39,6 +39,7 @@ 'KeyboardInterrupt' : 'interp_exceptions.W_KeyboardInterrupt', 'LookupError' : 'interp_exceptions.W_LookupError', 'MemoryError' : 'interp_exceptions.W_MemoryError', + 'ModuleNotFoundError': 'interp_exceptions.W_ModuleNotFoundError', 'NameError' : 'interp_exceptions.W_NameError', 'NotADirectoryError': 'interp_exceptions.W_NotADirectoryError', 'NotImplementedError' : 'interp_exceptions.W_NotImplementedError', diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -345,6 +345,10 @@ W_UnicodeError = _new_exception('UnicodeError', W_ValueError, """Unicode related error.""") +W_ModuleNotFoundError = _new_exception( + 'ModuleNotFoundError', W_ImportError, """Module not found.""" +) + class W_UnicodeTranslateError(W_UnicodeError): """Unicode translation error.""" diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -296,6 +296,14 @@ assert ImportError("message", path="y").path == "y" raises(TypeError, ImportError, invalid="z") + def test_modulenotfounderror(self): + assert ModuleNotFoundError("message").name is None + assert ModuleNotFoundError("message").path is None + assert ModuleNotFoundError("message", name="x").name == "x" + assert ModuleNotFoundError("message", path="y").path == "y" + raises(TypeError, ModuleNotFoundError, invalid="z") + assert repr(ModuleNotFoundError('test')) == "ModuleNotFoundError('test',)" + def test_blockingioerror(self): args = ("a", "b", "c", "d", "e") for n in range(6): From pypy.commits at gmail.com Sat Jul 15 12:06:31 2017 From: pypy.commits at gmail.com (rmariano) Date: Sat, 15 Jul 2017 09:06:31 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Remove import on weakref._remove_dead_weakref Message-ID: <596a3d87.4c3e1c0a.eed3e.12a8@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91878:6538ff6342b3 Date: 2017-07-15 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/6538ff6342b3/ Log: Remove import on weakref._remove_dead_weakref The function is already defined in the module, so it can be imported from there directly. The import (of a function that doesn't exist), would be overridden anyway by the definition of the function that lies after. diff --git a/lib-python/3/weakref.py b/lib-python/3/weakref.py --- a/lib-python/3/weakref.py +++ b/lib-python/3/weakref.py @@ -16,8 +16,8 @@ proxy, CallableProxyType, ProxyType, - ReferenceType, - _remove_dead_weakref) + ReferenceType +) from _weakrefset import WeakSet, _IterationGuard From pypy.commits at gmail.com Sat Jul 15 13:00:37 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 15 Jul 2017 10:00:37 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (johanfforsberg, arigo) Message-ID: <596a4a35.cc331c0a.5be61.3010@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91879:984030944ee1 Date: 2017-07-15 14:38 +0200 http://bitbucket.org/pypy/pypy/changeset/984030944ee1/ Log: (johanfforsberg, arigo) Passing test: "yield" statements inside "async" functions compile to AST already diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -1399,6 +1399,20 @@ assert isinstance(asyncwith.body[0], ast.Expr) assert isinstance(asyncwith.body[0].value, ast.Num) + def test_asyncYield(self): + mod = self.get_ast("async def f():\n yield 5") + assert isinstance(mod, ast.Module) + assert len(mod.body) == 1 + asyncdef = mod.body[0] + assert isinstance(asyncdef, ast.AsyncFunctionDef) + assert asyncdef.name == 'f' + assert asyncdef.args.args == None + assert len(asyncdef.body) == 1 + expr = asyncdef.body[0] + assert isinstance(expr, ast.Expr) + assert isinstance(expr.value, ast.Yield) + assert isinstance(expr.value.value, ast.Num) + def test_decode_error_in_string_literal(self): input = "u'\\x'" exc = py.test.raises(SyntaxError, self.get_ast, input).value From pypy.commits at gmail.com Sat Jul 15 13:00:40 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 15 Jul 2017 10:00:40 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (johanfforsberg, vxgmichel, arigo) Message-ID: <596a4a38.8f871c0a.86fc9.9c19@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91880:d438c0218901 Date: 2017-07-15 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/d438c0218901/ Log: (johanfforsberg, vxgmichel, arigo) First steps towards async generators (pff, whole afternoon) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1597,7 +1597,10 @@ def _get_code_flags(self): flags = AbstractFunctionCodeGenerator._get_code_flags(self) - return flags | consts.CO_COROUTINE + if flags & consts.CO_GENERATOR: + return (flags & ~consts.CO_GENERATOR) | consts.CO_ASYNC_GENERATOR + else: + return flags | consts.CO_COROUTINE class LambdaCodeGenerator(AbstractFunctionCodeGenerator): diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -11,6 +11,7 @@ CO_NOFREE = 0x0040 CO_COROUTINE = 0x0080 CO_ITERABLE_COROUTINE = 0x0100 # set by @types.coroutine +CO_ASYNC_GENERATOR = 0x0200 CO_GENERATOR_ALLOWED = 0x1000 CO_FUTURE_DIVISION = 0x2000 CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -298,20 +298,12 @@ class AsyncFunctionScope(FunctionScope): - def note_yield(self, yield_node): - raise SyntaxError("'yield' inside async function", yield_node.lineno, - yield_node.col_offset) - def note_yieldFrom(self, yield_node): raise SyntaxError("'yield from' inside async function", yield_node.lineno, yield_node.col_offset) def note_await(self, await_node): - # Compatibility with CPython 3.5: set the CO_GENERATOR flag in - # addition to the CO_COROUTINE flag if the function uses the - # "await" keyword. Don't do it if the function does not. In - # that case, CO_GENERATOR is ignored anyway. - self.is_generator = True + pass class ClassScope(Scope): diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -90,10 +90,15 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame_is_finished() - if space.is_w(w_result, space.w_None): - raise OperationError(space.w_StopIteration, space.w_None) + if isinstance(self, AsyncGenerator): + assert space.is_w(w_result, space.w_None), ( + "getting non-None here should be forbidden by the bytecode") + raise OperationError(space.w_StopAsyncIteration, space.w_None) else: - raise stopiteration_value(space, w_result) + if space.is_w(w_result, space.w_None): + raise OperationError(space.w_StopIteration, space.w_None) + else: + raise stopiteration_value(space, w_result) else: return w_result # YIELDed @@ -206,7 +211,8 @@ space = self.space if self.pycode.co_flags & (consts.CO_FUTURE_GENERATOR_STOP | consts.CO_COROUTINE | - consts.CO_ITERABLE_COROUTINE): + consts.CO_ITERABLE_COROUTINE | + consts.CO_ASYNC_GENERATOR): e2 = OperationError(space.w_RuntimeError, space.newtext("%s raised StopIteration" % self.KIND)) @@ -557,3 +563,49 @@ if op >= HAVE_ARGUMENT: i += 2 return count_yields >= 2 + + +# ------------------------------------------------ +# Python 3.6 async generators + + +class AsyncGenerator(GeneratorOrCoroutine): + "An async generator (i.e. a coroutine with a 'yield')" + KIND = "async_generator" + + def descr__aiter__(self): + """Return an asynchronous iterator.""" + return self + + def descr__anext__(self): + return AsyncGenASend(self) + + def descr_asend(self, w_arg): + XXX + return AsyncGenASend(w_arg) + + def descr_athrow(self, w_type, w_val=None, w_tb=None): + XXX + return AsyncGenAThrow(w_type, w_val, w_tb) + + +class AsyncGenValueWrapper(W_Root): + def __init__(self, w_value): + self.w_value = w_value + + +class AsyncGenASend(W_Root): + + def __init__(self, async_gen): + self.async_gen = async_gen + + def descr__iter__(self): + return self + + def descr__next__(self): + space = self.async_gen.space + w_value = self.async_gen.send_ex(space.w_None) + if isinstance(w_value, AsyncGenValueWrapper): + raise OperationError(space.w_StopIteration, w_value.w_value) + else: + return w_value diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,8 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_COROUTINE, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) + CO_GENERATOR, CO_COROUTINE, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY, + CO_ASYNC_GENERATOR) from pypy.tool import dis3 from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -244,7 +244,8 @@ def _is_generator_or_coroutine(self): return (self.getcode().co_flags & (pycode.CO_COROUTINE | - pycode.CO_GENERATOR)) != 0 + pycode.CO_GENERATOR | + pycode.CO_ASYNC_GENERATOR)) != 0 def run(self, name=None, qualname=None): """Start this frame's execution.""" @@ -278,16 +279,24 @@ def initialize_as_generator(self, name, qualname): space = self.space - if self.getcode().co_flags & pycode.CO_COROUTINE: + flags = self.getcode().co_flags + if flags & pycode.CO_GENERATOR: + from pypy.interpreter.generator import GeneratorIterator + gen = GeneratorIterator(self, name, qualname) + ec = None + w_wrapper = None + elif flags & pycode.CO_COROUTINE: from pypy.interpreter.generator import Coroutine gen = Coroutine(self, name, qualname) ec = space.getexecutioncontext() w_wrapper = ec.w_coroutine_wrapper_fn - else: - from pypy.interpreter.generator import GeneratorIterator - gen = GeneratorIterator(self, name, qualname) + elif flags & pycode.CO_ASYNC_GENERATOR: + from pypy.interpreter.generator import AsyncGenerator + gen = AsyncGenerator(self, name, qualname) ec = None w_wrapper = None + else: + raise AssertionError("bad co_flags") if space.config.translation.rweakref: self.f_generator_wref = rweakref.ref(gen) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1088,6 +1088,11 @@ def YIELD_VALUE(self, oparg, next_instr): + if self.getcode().co_flags & pycode.CO_ASYNC_GENERATOR: + from pypy.interpreter.generator import AsyncGenValueWrapper + w_value = self.popvalue() + w_value = AsyncGenValueWrapper(w_value) + self.pushvalue(w_value) raise Yield def YIELD_FROM(self, oparg, next_instr): @@ -1096,7 +1101,6 @@ # Instead, we directly set the generator's w_yielded_from. # This asks generator.resume_execute_frame() to exhaust that # sub-iterable first before continuing on the next bytecode. - from pypy.interpreter.generator import Coroutine in_generator = self.get_generator() assert in_generator is not None w_inputvalue = self.popvalue() # that's always w_None, actually @@ -1595,6 +1599,7 @@ def GET_ANEXT(self, oparg, next_instr): from pypy.interpreter.generator import get_awaitable_iter + # XXX add performance shortcut if w_aiter is an AsyncGenerator space = self.space w_aiter = self.peekvalue() w_func = space.lookup(w_aiter, "__anext__") diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -203,3 +203,22 @@ pass assert result == [42] """ + + def test_async_yield(self): """ + class Done(Exception): pass + + async def mygen(): + yield 5 + + result = [] + async def foo(): + async for i in mygen(): + result.append(i) + raise Done + + try: + foo().send(None) + except Done: + pass + assert result == [5] + """ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -487,6 +487,7 @@ ClassMethod, BuiltinFunction, descr_function_get) from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter.generator import GeneratorIterator, Coroutine +from pypy.interpreter.generator import AsyncGenerator, AsyncGenASend from pypy.interpreter.generator import CoroutineWrapper, AIterWrapper from pypy.interpreter.nestedscope import Cell from pypy.interpreter.special import NotImplemented, Ellipsis @@ -852,6 +853,34 @@ ) assert not Coroutine.typedef.acceptable_as_base_class # no __new__ +AsyncGenerator.typedef = TypeDef("async_generator", + __repr__ = interp2app(AsyncGenerator.descr__repr__), + #__reduce__ = interp2app(Coroutine.descr__reduce__), + #__setstate__ = interp2app(Coroutine.descr__setstate__), + asend = interp2app(AsyncGenerator.descr_send, + descrmismatch='asend'), + athrow = interp2app(AsyncGenerator.descr_throw, + descrmismatch='athrow'), + aclose = interp2app(AsyncGenerator.descr_close, + descrmismatch='aclose'), + __aiter__ = interp2app(AsyncGenerator.descr__aiter__, + descrmismatch='__aiter__'), + __anext__ = interp2app(AsyncGenerator.descr__anext__, + descrmismatch='__anext__'), + ag_running = interp_attrproperty('running', cls=AsyncGenerator, wrapfn="newbool"), + ag_frame = GetSetProperty(AsyncGenerator.descr_gicr_frame), + ag_code = interp_attrproperty_w('pycode', cls=AsyncGenerator), + ag_await = interp_attrproperty_w('w_yielded_from', cls=AsyncGenerator), + __name__ = GetSetProperty(AsyncGenerator.descr__name__, + AsyncGenerator.descr_set__name__, + doc="name of the async generator"), + __qualname__ = GetSetProperty(AsyncGenerator.descr__qualname__, + AsyncGenerator.descr_set__qualname__, + doc="qualified name of the async generator"), + __weakref__ = make_weakref_descr(AsyncGenerator), +) +assert not AsyncGenerator.typedef.acceptable_as_base_class # no __new__ + CoroutineWrapper.typedef = TypeDef("coroutine_wrapper", __iter__ = interp2app(CoroutineWrapper.descr__iter__), __next__ = interp2app(CoroutineWrapper.descr__next__), @@ -868,6 +897,12 @@ ) assert not AIterWrapper.typedef.acceptable_as_base_class # no __new__ +AsyncGenASend.typedef = TypeDef("async_generator_asend", + __await__ = interp2app(AsyncGenASend.descr__iter__), + __iter__ = interp2app(AsyncGenASend.descr__iter__), + __next__ = interp2app(AsyncGenASend.descr__next__), +) + Cell.typedef = TypeDef("cell", __total_ordering__ = 'auto', __lt__ = interp2app(Cell.descr__lt__), From pypy.commits at gmail.com Sat Jul 15 13:00:43 2017 From: pypy.commits at gmail.com (arigo) Date: Sat, 15 Jul 2017 10:00:43 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: merge heads Message-ID: <596a4a3b.fa87df0a.651bd.b6ca@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91881:766b07d52393 Date: 2017-07-15 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/766b07d52393/ Log: merge heads diff --git a/lib-python/3/weakref.py b/lib-python/3/weakref.py --- a/lib-python/3/weakref.py +++ b/lib-python/3/weakref.py @@ -16,8 +16,8 @@ proxy, CallableProxyType, ProxyType, - ReferenceType, - _remove_dead_weakref) + ReferenceType +) from _weakrefset import WeakSet, _IterationGuard diff --git a/pypy/module/exceptions/__init__.py b/pypy/module/exceptions/__init__.py --- a/pypy/module/exceptions/__init__.py +++ b/pypy/module/exceptions/__init__.py @@ -4,7 +4,7 @@ class Module(MixedModule): applevel_name = '__exceptions__' appleveldefs = {} - + interpleveldefs = { 'ArithmeticError' : 'interp_exceptions.W_ArithmeticError', 'AssertionError' : 'interp_exceptions.W_AssertionError', @@ -39,6 +39,7 @@ 'KeyboardInterrupt' : 'interp_exceptions.W_KeyboardInterrupt', 'LookupError' : 'interp_exceptions.W_LookupError', 'MemoryError' : 'interp_exceptions.W_MemoryError', + 'ModuleNotFoundError': 'interp_exceptions.W_ModuleNotFoundError', 'NameError' : 'interp_exceptions.W_NameError', 'NotADirectoryError': 'interp_exceptions.W_NotADirectoryError', 'NotImplementedError' : 'interp_exceptions.W_NotImplementedError', diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -345,6 +345,10 @@ W_UnicodeError = _new_exception('UnicodeError', W_ValueError, """Unicode related error.""") +W_ModuleNotFoundError = _new_exception( + 'ModuleNotFoundError', W_ImportError, """Module not found.""" +) + class W_UnicodeTranslateError(W_UnicodeError): """Unicode translation error.""" diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -296,6 +296,14 @@ assert ImportError("message", path="y").path == "y" raises(TypeError, ImportError, invalid="z") + def test_modulenotfounderror(self): + assert ModuleNotFoundError("message").name is None + assert ModuleNotFoundError("message").path is None + assert ModuleNotFoundError("message", name="x").name == "x" + assert ModuleNotFoundError("message", path="y").path == "y" + raises(TypeError, ModuleNotFoundError, invalid="z") + assert repr(ModuleNotFoundError('test')) == "ModuleNotFoundError('test',)" + def test_blockingioerror(self): args = ("a", "b", "c", "d", "e") for n in range(6): From pypy.commits at gmail.com Sat Jul 15 13:06:09 2017 From: pypy.commits at gmail.com (rlamy) Date: Sat, 15 Jul 2017 10:06:09 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: (ronan, vaclav) Improve performance of bytearray.extend() by rewriting some of it at app-level Message-ID: <596a4b81.cd3f1c0a.a02ba.f132@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91882:6159e89116af Date: 2017-07-15 19:05 +0200 http://bitbucket.org/pypy/pypy/changeset/6159e89116af/ Log: (ronan, vaclav) Improve performance of bytearray.extend() by rewriting some of it at app-level diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -750,20 +750,12 @@ "cannot convert a (unicode) str object to bytes") # sequence of bytes - w_iter = space.iter(w_source) - length_hint = space.length_hint(w_source, 0) - builder = StringBuilder(length_hint) - while True: - try: - w_item = space.next(w_iter) - except OperationError as e: - if not e.match(space, space.w_StopIteration): - raise - break - value = space.byte_w(w_item) - builder.append(value) - return builder.build() - + w_result = space.appexec([w_source], """(seq): + result = bytearray() + for i in seq: + result.append(i) + return result""") + return w_result.getdata() W_BytesObject.typedef = TypeDef( "bytes", None, None, "read", From pypy.commits at gmail.com Sun Jul 16 05:07:58 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 02:07:58 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (vxgmichel, arigo) Message-ID: <596b2cee.c7871c0a.4faf4.83c6@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91883:68e743f6f11e Date: 2017-07-16 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/68e743f6f11e/ Log: (vxgmichel, arigo) Handle exhausted async generators correctly diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -43,7 +43,7 @@ def descr__repr__(self, space): addrstring = self.getaddrstring(space) return space.newunicode(u"<%s object %s at 0x%s>" % - (unicode(self.KIND), + (self.KIND_U, self.get_qualname(), unicode(addrstring))) @@ -80,6 +80,8 @@ # execute_frame() if the frame is actually finished if isinstance(w_arg_or_err, SApplicationException): operr = w_arg_or_err.operr + elif isinstance(self, AsyncGenerator): + operr = OperationError(space.w_StopAsyncIteration, space.w_None) else: operr = OperationError(space.w_StopIteration, space.w_None) raise operr @@ -345,6 +347,7 @@ class GeneratorIterator(GeneratorOrCoroutine): "An iterator created by a generator." KIND = "generator" + KIND_U = u"generator" def descr__iter__(self): """Implement iter(self).""" @@ -393,6 +396,7 @@ class Coroutine(GeneratorOrCoroutine): "A coroutine object." KIND = "coroutine" + KIND_U = u"coroutine" def descr__await__(self, space): return CoroutineWrapper(self) @@ -571,7 +575,8 @@ class AsyncGenerator(GeneratorOrCoroutine): "An async generator (i.e. a coroutine with a 'yield')" - KIND = "async_generator" + KIND = "async generator" + KIND_U = u"async_generator" def descr__aiter__(self): """Return an asynchronous iterator.""" diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -222,3 +222,25 @@ pass assert result == [5] """ + + def test_async_yield_already_finished(self): """ + class Done(Exception): pass + + async def mygen(): + yield 5 + + result = [] + async def foo(): + g = mygen() + async for i in g: + result.append(i) + async for i in g: + assert False # should not be reached + raise Done + + try: + foo().send(None) + except Done: + pass + assert result == [5] + """ From pypy.commits at gmail.com Sun Jul 16 05:49:26 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 02:49:26 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: in-progress Message-ID: <596b36a6.0387df0a.329e1.fc1c@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91884:2d1ce02f13b7 Date: 2017-07-16 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/2d1ce02f13b7/ Log: in-progress diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -380,6 +380,12 @@ _break_context_cycle(space, w_value, w_context) space.setattr(w_value, space.newtext('__context__'), w_context) + def chain_exceptions_from_cause(self, space, exception): + # XXX does this code really make sense? + self.chain_exceptions(space, exception) + self.set_cause(space, exception.get_w_value(space)) + self.record_context(space, space.getexecutioncontext()) + # A simplified version of _PyErr_TrySetFromCause, which returns a # new exception of the same class, but with another error message. # This only works for exceptions which have just a single message, diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -129,6 +129,9 @@ try: if e.match(space, space.w_StopIteration): self._leak_stopiteration(e) + elif (isinstance(self, AsyncGenerator) and + e.match(space, space.w_StopAsyncIteration)): + self._leak_stopasynciteration(e) finally: self.frame_is_finished() raise @@ -218,14 +221,19 @@ e2 = OperationError(space.w_RuntimeError, space.newtext("%s raised StopIteration" % self.KIND)) - e2.chain_exceptions(space, e) - e2.set_cause(space, e.get_w_value(space)) - e2.record_context(space, space.getexecutioncontext()) + e2.chain_exceptions_from_cause(space, e) raise e2 else: space.warn(space.newunicode(u"generator '%s' raised StopIteration" % self.get_qualname()), - space.w_PendingDeprecationWarning) + space.w_DeprecationWarning) + + def _leak_stopasynciteration(self, e): + space = self.space + e2 = OperationError(space.w_RuntimeError, + space.newtext("async generator raised StopAsyncIteration")) + e2.chain_exceptions_from_cause(space, e) + raise e2 def descr_throw(self, w_type, w_val=None, w_tb=None): """throw(typ[,val[,tb]]) -> raise exception in generator/coroutine, @@ -469,7 +477,7 @@ def gen_close_iter(space, w_yf): # This helper function is used by close() and throw() to # close a subiterator being delegated to by yield-from. - if isinstance(w_yf, GeneratorIterator): + if isinstance(w_yf, GeneratorIterator) or isinstance(w_yf, Coroutine): w_yf.descr_close() else: try: From pypy.commits at gmail.com Sun Jul 16 06:13:07 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 03:13:07 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (vxgmichel, rmariano, arigo) Message-ID: <596b3c33.118e1c0a.7d6c1.75fb@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91885:8a655968dc6f Date: 2017-07-16 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/8a655968dc6f/ Log: (vxgmichel, rmariano, arigo) Start on async generators that contain "await" diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -186,6 +186,8 @@ try: if isinstance(w_yf, GeneratorOrCoroutine): w_retval = w_yf.send_ex(w_inputvalue_or_err) + elif isinstance(w_yf, AsyncGenASend): + w_retval = w_yf.send_ex(w_inputvalue_or_err) elif space.is_w(w_inputvalue_or_err, space.w_None): w_retval = space.next(w_yf) else: @@ -617,7 +619,20 @@ def descr__next__(self): space = self.async_gen.space - w_value = self.async_gen.send_ex(space.w_None) + return self.send_ex(space.w_None) + + def descr_send(self, w_arg): + XXX + + def descr_throw(self, w_type, w_val=None, w_tb=None): + XXX + + def descr_close(self): + XXX + + def send_ex(self, w_arg_or_err): + space = self.async_gen.space + w_value = self.async_gen.send_ex(w_arg_or_err) if isinstance(w_value, AsyncGenValueWrapper): raise OperationError(space.w_StopIteration, w_value.w_value) else: diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -244,3 +244,35 @@ pass assert result == [5] """ + + def test_async_yield_with_await(self): """ + class Done(Exception): pass + + class X: + def __await__(self): + i1 = yield 40 + assert i1 == 82 + i2 = yield 41 + assert i2 == 93 + + async def mygen(): + yield 5 + await X() + yield 6 + + result = [] + async def foo(): + async for i in mygen(): + result.append(i) + raise Done + + co = foo() + x = co.send(None) + assert x == 40 + assert result == [5] + x = co.send(82) + assert x == 41 + assert result == [5] + raises(Done, co.send, 93) + assert result == [5, 6] + """ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -901,7 +901,18 @@ __await__ = interp2app(AsyncGenASend.descr__iter__), __iter__ = interp2app(AsyncGenASend.descr__iter__), __next__ = interp2app(AsyncGenASend.descr__next__), + close = interp2app(AsyncGenASend.descr_close), + send = interp2app(AsyncGenASend.descr_send), + throw = interp2app(AsyncGenASend.descr_throw), ) +#AsyncGenAThrow.typedef = TypeDef("async_generator_athrow", +# __await__ = interp2app(AsyncGenAThrow.descr__iter__), +# __iter__ = interp2app(AsyncGenAThrow.descr__iter__), +# __next__ = interp2app(AsyncGenAThrow.descr__next__), +# close = interp2app(AsyncGenAThrow.descr_close), +# send = interp2app(AsyncGenAThrow.descr_send), +# throw = interp2app(AsyncGenAThrow.descr_throw), +#) Cell.typedef = TypeDef("cell", __total_ordering__ = 'auto', From pypy.commits at gmail.com Sun Jul 16 07:15:26 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 04:15:26 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (vxgmichel, arigo) Message-ID: <596b4ace.a3b0df0a.88bd.c5f8@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91886:bc59a0b5e616 Date: 2017-07-16 13:14 +0200 http://bitbucket.org/pypy/pypy/changeset/bc59a0b5e616/ Log: (vxgmichel, arigo) asend/send on async_generator objects diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -5,7 +5,7 @@ # some tests fail otherwise sys.setrecursionlimit(2000) -LOOK_FOR_PYTHON3 = 'python3.5' +LOOK_FOR_PYTHON3 = 'python3.6' PYTHON3 = os.getenv('PYTHON3') or py.path.local.sysfind(LOOK_FOR_PYTHON3) if PYTHON3 is not None: PYTHON3 = str(PYTHON3) diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -593,16 +593,18 @@ return self def descr__anext__(self): - return AsyncGenASend(self) + return AsyncGenASend(self, self.space.w_None) def descr_asend(self, w_arg): - XXX - return AsyncGenASend(w_arg) + return AsyncGenASend(self, w_arg) def descr_athrow(self, w_type, w_val=None, w_tb=None): XXX return AsyncGenAThrow(w_type, w_val, w_tb) + def descr_aclose(self): + XXX + class AsyncGenValueWrapper(W_Root): def __init__(self, w_value): @@ -610,9 +612,11 @@ class AsyncGenASend(W_Root): + state = 0 - def __init__(self, async_gen): + def __init__(self, async_gen, w_value_to_send): self.async_gen = async_gen + self.w_value_to_send = w_value_to_send def descr__iter__(self): return self @@ -632,8 +636,29 @@ def send_ex(self, w_arg_or_err): space = self.async_gen.space - w_value = self.async_gen.send_ex(w_arg_or_err) - if isinstance(w_value, AsyncGenValueWrapper): - raise OperationError(space.w_StopIteration, w_value.w_value) - else: - return w_value + if self.state == 2: + raise OperationError(space.w_StopIteration, space.w_None) + + # We think that the code should look like this: + #if self.w_value_to_send is not None: + # if not space.is_w(w_arg_or_err, space.w_None): + # raise ... + # w_arg_or_err = self.w_value_to_send + # self.w_value_to_send = None + + # But instead, CPython's logic is this, which we think is + # giving nonsense results for 'g.asend(42).send(43)': + if self.state == 0: + if space.is_w(w_arg_or_err, space.w_None): + w_arg_or_err = self.w_value_to_send + self.state = 1 + + try: + w_value = self.async_gen.send_ex(w_arg_or_err) + if isinstance(w_value, AsyncGenValueWrapper): + raise OperationError(space.w_StopIteration, w_value.w_value) + else: + return w_value + except OperationError as e: + self.state = 2 + raise diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -276,3 +276,42 @@ raises(Done, co.send, 93) assert result == [5, 6] """ + + def test_async_yield_with_explicit_send(self): """ + class X: + def __await__(self): + i1 = yield 40 + assert i1 == 82 + i2 = yield 41 + assert i2 == 93 + + async def mygen(): + x = yield 5 + assert x == 2189 + await X() + y = yield 6 + assert y == 319 + + result = [] + async def foo(): + gen = mygen() + result.append(await gen.asend(None)) + result.append(await gen.asend(2189)) + try: + await gen.asend(319) + except StopAsyncIteration: + return 42 + else: + raise AssertionError + + co = foo() + x = co.send(None) + assert x == 40 + assert result == [5] + x = co.send(82) + assert x == 41 + assert result == [5] + e = raises(StopIteration, co.send, 93) + assert e.value.args == (42,) + assert result == [5, 6] + """ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -857,11 +857,11 @@ __repr__ = interp2app(AsyncGenerator.descr__repr__), #__reduce__ = interp2app(Coroutine.descr__reduce__), #__setstate__ = interp2app(Coroutine.descr__setstate__), - asend = interp2app(AsyncGenerator.descr_send, + asend = interp2app(AsyncGenerator.descr_asend, descrmismatch='asend'), - athrow = interp2app(AsyncGenerator.descr_throw, + athrow = interp2app(AsyncGenerator.descr_athrow, descrmismatch='athrow'), - aclose = interp2app(AsyncGenerator.descr_close, + aclose = interp2app(AsyncGenerator.descr_aclose, descrmismatch='aclose'), __aiter__ = interp2app(AsyncGenerator.descr__aiter__, descrmismatch='__aiter__'), From pypy.commits at gmail.com Sun Jul 16 07:28:05 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 04:28:05 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: More tests, not passing Message-ID: <596b4dc5.8292df0a.7c581.abae@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91887:4d7a6b1e53da Date: 2017-07-16 13:27 +0200 http://bitbucket.org/pypy/pypy/changeset/4d7a6b1e53da/ Log: More tests, not passing diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -315,3 +315,71 @@ assert e.value.args == (42,) assert result == [5, 6] """ + + def test_async_yield_explicit_asend_and_next(self): """ + async def mygen(y): + assert y == 4983 + x = yield 5 + assert x == 2189 + yield "ok" + + g = mygen(4983) + raises(TypeError, g.asend(42).__next__) + e = raises(StopIteration, g.asend(None).__next__) + assert e.value.args == (5,) + e = raises(StopIteration, g.asend(2189).__next__) + assert e.value.args == ("ok",) + """ + + def test_async_yield_explicit_asend_and_send(self): """ + async def mygen(y): + assert y == 4983 + x = yield 5 + assert x == 2189 + yield "ok" + + g = mygen(4983) + e = raises(TypeError, g.asend(None).send, 42) + assert str(e.value) == ("can't send non-None value to a just-started " + "async generator") + e = raises(StopIteration, g.asend(None).send, None) + assert e.value.args == (5,) + e = raises(StopIteration, g.asend("IGNORED").send, 2189) # xxx + assert e.value.args == ("ok",) + """ + + def test_async_yield_explicit_asend_used_several_times(self): """ + class X: + def __await__(self): + r = yield -2 + assert r == "cont1" + r = yield -3 + assert r == "cont2" + return -4 + async def mygen(y): + x = await X() + assert x == -4 + r = yield -5 + assert r == "foo" + r = yield -6 + assert r == "bar" + + g = mygen(4983) + gs = g.asend(None) + r = gs.send(None) + assert r == -2 + r = gs.send("cont1") + assert r == -3 + e = raises(StopIteration, gs.send, "cont2") + assert e.value.args == (-5,) + e = raises(StopIteration, gs.send, None) + assert e.value.args == () + e = raises(StopIteration, gs.send, None) + assert e.value.args == () + # + gs = g.asend("foo") + e = raises(StopIteration, gs.send, None) + assert e.value.args == (-6,) + e = raises(StopIteration, gs.send, "bar") + assert e.value.args == () + """ From pypy.commits at gmail.com Sun Jul 16 07:42:03 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 04:42:03 -0700 (PDT) Subject: [pypy-commit] pypy default: Add a test that is passing on pypy2.7, cpython2.7, cpython3.x, but not pypy3.5 Message-ID: <596b510b.95061c0a.70388.247d@mx.google.com> Author: Armin Rigo Branch: Changeset: r91888:0b72fd1a7641 Date: 2017-07-16 13:41 +0200 http://bitbucket.org/pypy/pypy/changeset/0b72fd1a7641/ Log: Add a test that is passing on pypy2.7, cpython2.7, cpython3.x, but not pypy3.5 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -297,6 +297,13 @@ except TypeError: pass + def test_multiple_invalid_sends(self): + def mygen(): + yield 42 + g = mygen() + raises(TypeError, g.send, 2) + raises(TypeError, g.send, 2) + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline From pypy.commits at gmail.com Sun Jul 16 07:44:55 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 04:44:55 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <596b51b7.12a9df0a.39c68.65cf@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91889:b58d747dcf74 Date: 2017-07-16 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/b58d747dcf74/ Log: hg merge default diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -580,6 +580,7 @@ "getentropy() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") + @test_support.impl_detail(pypy=False) # on Linux, may use getrandom() def test_urandom_failure(self): # Check urandom() failing when it is not able to open /dev/random. # We spawn a new process to make the test more robust (if getrlimit() diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -531,6 +531,13 @@ assert next(gen) is 1 assert next(gen) is 2 + def test_multiple_invalid_sends(self): + def mygen(): + yield 42 + g = mygen() + raises(TypeError, g.send, 2) + raises(TypeError, g.send, 2) + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -10,7 +10,7 @@ from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, bootstrap_function, - build_type_checkers, cpython_api, generic_cpy_call, + build_type_checkers, cpython_api, generic_cpy_call, CANNOT_FAIL, PyTypeObjectPtr, slot_function, cts) from pypy.module.cpyext.pyobject import ( Py_DecRef, from_ref, make_ref, as_pyobj, make_typedescr) @@ -106,8 +106,14 @@ self.space, u"built-in method '%s' of '%s' object" % (self.name.decode('utf-8'), self.w_objclass.getname(self.space))) -PyCFunction_Check, PyCFunction_CheckExact = build_type_checkers( - "CFunction", W_PyCFunctionObject) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCFunction_Check(space, w_obj): + from pypy.interpreter.function import BuiltinFunction + if w_obj is None: + return False + if isinstance(w_obj, W_PyCFunctionObject): + return True + return isinstance(w_obj, BuiltinFunction) class W_PyCClassMethodObject(W_PyCFunctionObject): w_self = None diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -442,15 +442,19 @@ Py_PRINT_RAW = 1 # No string quotes etc. @cpython_api([PyObject, FILEP, rffi.INT_real], rffi.INT_real, error=-1) -def PyObject_Print(space, w_obj, fp, flags): +def PyObject_Print(space, pyobj, fp, flags): """Print an object o, on file fp. Returns -1 on error. The flags argument is used to enable certain printing options. The only option currently supported is Py_PRINT_RAW; if given, the str() of the object is written instead of the repr().""" - if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: - w_str = space.str(w_obj) + if not pyobj: + w_str = space.newtext("") else: - w_str = space.repr(w_obj) + w_obj = from_ref(space, pyobj) + if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: + w_str = space.str(w_obj) + else: + w_str = space.repr(w_obj) count = space.len_w(w_str) data = space.text_w(w_str) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -455,7 +455,7 @@ ('tp_iter', '__iter__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -470,7 +470,7 @@ ('tp_as_mapping.c_mp_length', '__len__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @slot_function([PyObject], lltype.Signed, error=-1) @@ -497,7 +497,7 @@ ('tp_as_mapping.c_mp_subscript', '__getitem__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -514,7 +514,7 @@ ('tp_as_sequence.c_sq_inplace_repeat', '__imul__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -528,7 +528,7 @@ for tp_name, attr in [('tp_as_number.c_nb_power', '__pow__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -541,10 +541,10 @@ for tp_name, attr in [('tp_as_mapping.c_mp_ass_subscript', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -562,10 +562,10 @@ for tp_name, attr in [('tp_as_sequence.c_sq_ass_item', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -582,8 +582,8 @@ if handled: pass elif name == 'tp_setattro': - setattr_fn = w_type.getdictvalue(space, '__setattr__') - delattr_fn = w_type.getdictvalue(space, '__delattr__') + setattr_fn = w_type.lookup('__setattr__') + delattr_fn = w_type.lookup('__delattr__') if setattr_fn is None: return @@ -598,7 +598,7 @@ return 0 slot_func = slot_tp_setattro elif name == 'tp_getattro': - getattr_fn = w_type.getdictvalue(space, '__getattribute__') + getattr_fn = w_type.lookup('__getattribute__') if getattr_fn is None: return @@ -609,7 +609,7 @@ slot_func = slot_tp_getattro elif name == 'tp_call': - call_fn = w_type.getdictvalue(space, '__call__') + call_fn = w_type.lookup('__call__') if call_fn is None: return @@ -622,7 +622,7 @@ slot_func = slot_tp_call elif name == 'tp_iternext': - iternext_fn = w_type.getdictvalue(space, '__next__') + iternext_fn = w_type.lookup('__next__') if iternext_fn is None: return @@ -638,7 +638,7 @@ slot_func = slot_tp_iternext elif name == 'tp_init': - init_fn = w_type.getdictvalue(space, '__init__') + init_fn = w_type.lookup('__init__') if init_fn is None: return @@ -651,7 +651,7 @@ return 0 slot_func = slot_tp_init elif name == 'tp_new': - new_fn = w_type.getdictvalue(space, '__new__') + new_fn = w_type.lookup('__new__') if new_fn is None: return @@ -663,7 +663,7 @@ return space.call_args(space.get(new_fn, w_self), args) slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': - buff_fn = w_type.getdictvalue(space, '__buffer__') + buff_fn = w_type.lookup('__buffer__') if buff_fn is not None: buff_w = slot_from___buffer__(space, typedef, buff_fn) elif typedef.buffer: @@ -672,7 +672,7 @@ return slot_func = buff_w elif name == 'tp_descr_get': - get_fn = w_type.getdictvalue(space, '__get__') + get_fn = w_type.lookup('__get__') if get_fn is None: return @@ -684,8 +684,8 @@ return space.call_function(get_fn, w_self, w_obj, w_value) slot_func = slot_tp_descr_get elif name == 'tp_descr_set': - set_fn = w_type.getdictvalue(space, '__set__') - delete_fn = w_type.getdictvalue(space, '__delete__') + set_fn = w_type.lookup('__set__') + delete_fn = w_type.lookup('__delete__') if set_fn is None and delete_fn is None: return diff --git a/pypy/module/cpyext/test/test_boolobject.py b/pypy/module/cpyext/test/test_boolobject.py --- a/pypy/module/cpyext/test/test_boolobject.py +++ b/pypy/module/cpyext/test/test_boolobject.py @@ -26,3 +26,20 @@ ]) assert module.get_true() == True assert module.get_false() == False + + def test_toint(self): + module = self.import_extension('foo', [ + ("to_int", "METH_O", + ''' + if (args->ob_type->tp_as_number && args->ob_type->tp_as_number->nb_int) { + return args->ob_type->tp_as_number->nb_int(args); + } + else { + PyErr_SetString(PyExc_TypeError,"cannot convert bool to int"); + return NULL; + } + '''), ]) + assert module.to_int(False) == 0 + assert module.to_int(True) == 1 + + diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -77,6 +77,31 @@ assert mod.isSameFunction(mod.getarg_O) raises(SystemError, mod.isSameFunction, 1) + def test_check(self): + mod = self.import_extension('foo', [ + ('check', 'METH_O', + ''' + return PyLong_FromLong(PyCFunction_Check(args)); + '''), + ]) + from math import degrees + assert mod.check(degrees) == 1 + assert mod.check(list) == 0 + assert mod.check(sorted) == 1 + def func(): + pass + class A(object): + def meth(self): + pass + @staticmethod + def stat(): + pass + assert mod.check(func) == 0 + assert mod.check(A) == 0 + assert mod.check(A.meth) == 0 + assert mod.check(A.stat) == 0 + + class TestPyCMethodObject(BaseApiTest): def test_repr(self, space, api): """ diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -288,13 +288,20 @@ if (fp == NULL) Py_RETURN_NONE; ret = PyObject_Print(obj, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } + ret = PyObject_Print(NULL, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } fclose(fp); - if (ret < 0) - return NULL; Py_RETURN_TRUE; """)]) assert module.dump(self.tmpname, None) - assert open(self.tmpname).read() == 'None' + assert open(self.tmpname).read() == 'None' def test_issue1970(self): module = self.import_extension('foo', [ diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -639,7 +639,10 @@ addresses_of_static_ptrs = ( self.layoutbuilder.addresses_of_static_ptrs_in_nongc + self.layoutbuilder.addresses_of_static_ptrs) - log.info("found %s static roots" % (len(addresses_of_static_ptrs), )) + if len(addresses_of_static_ptrs) == 1: + log.info("found 1 static root") + else: + log.info("found %s static roots" % (len(addresses_of_static_ptrs), )) ll_static_roots_inside = lltype.malloc(lltype.Array(llmemory.Address), len(addresses_of_static_ptrs), immortal=True) From pypy.commits at gmail.com Sun Jul 16 07:48:21 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 04:48:21 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: fix for test_async_yield_explicit_asend_used_several_times Message-ID: <596b5285.a6aedf0a.75097.13f9@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91890:d3b14669d2ed Date: 2017-07-16 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/d3b14669d2ed/ Log: fix for test_async_yield_explicit_asend_used_several_times diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -626,7 +626,7 @@ return self.send_ex(space.w_None) def descr_send(self, w_arg): - XXX + return self.send_ex(w_arg) def descr_throw(self, w_type, w_val=None, w_tb=None): XXX From pypy.commits at gmail.com Sun Jul 16 08:09:28 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 05:09:28 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: rename args Message-ID: <596b5778.d097df0a.aafc4.06e7@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91891:f6faa2c5c765 Date: 2017-07-16 14:08 +0200 http://bitbucket.org/pypy/pypy/changeset/f6faa2c5c765/ Log: rename args diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -15,9 +15,9 @@ class __extend__(PyFrame): - def execute_frame(self, w_inputvalue=None, operr=None): + def execute_frame(self, in_generator=None, w_arg_or_err=None): # indirection for the optional arguments - return my_execute_frame(self, w_inputvalue, operr) + return my_execute_frame(self, in_generator, w_arg_or_err) def _safe(s): From pypy.commits at gmail.com Sun Jul 16 08:18:50 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 05:18:50 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: (arigo, rmariano) Message-ID: <596b59aa.a1abdf0a.72ac4.3159@mx.google.com> Author: Mariano Anaya Branch: py3.5 Changeset: r91892:0e1f2cd91ced Date: 2017-07-16 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/0e1f2cd91ced/ Log: (arigo, rmariano) Fixing regression with non-started generator receiving non-None, should always raise TypeError. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -109,11 +109,16 @@ # # Optimization only: after we've started a Coroutine without # CO_YIELD_INSIDE_TRY, then Coroutine._finalize_() will be a no-op - if (isinstance(self, Coroutine) - and frame.last_instr == -1 - and not (self.pycode.co_flags & CO_YIELD_INSIDE_TRY)): - rgc.may_ignore_finalizer(self) - # + if frame.last_instr == -1: + if (isinstance(self, Coroutine) and + not (self.pycode.co_flags & CO_YIELD_INSIDE_TRY)): + rgc.may_ignore_finalizer(self) + + if (not space.is_w(w_arg_or_err, space.w_None) and + not isinstance(w_arg_or_err, SApplicationException)): + raise oefmt(space.w_TypeError, + "can't send non-None value to a just-started %s", + self.KIND) self.running = True try: w_result = frame.execute_frame(self, w_arg_or_err) @@ -158,12 +163,7 @@ return frame.handle_generator_error(w_arg_or_err.operr) last_instr = jit.promote(frame.last_instr) - if last_instr == -1: - if not space.is_w(w_arg_or_err, space.w_None): - raise oefmt(space.w_TypeError, - "can't send non-None value to a just-started %s", - self.KIND) - else: + if last_instr != -1: frame.pushvalue(w_arg_or_err) return r_uint(last_instr + 1) From pypy.commits at gmail.com Sun Jul 16 08:18:51 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 05:18:51 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Merge heads Message-ID: <596b59ab.10a1df0a.8e45c.592a@mx.google.com> Author: Mariano Anaya Branch: py3.5 Changeset: r91893:6de61c965087 Date: 2017-07-16 14:18 +0200 http://bitbucket.org/pypy/pypy/changeset/6de61c965087/ Log: Merge heads diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -15,9 +15,9 @@ class __extend__(PyFrame): - def execute_frame(self, w_inputvalue=None, operr=None): + def execute_frame(self, in_generator=None, w_arg_or_err=None): # indirection for the optional arguments - return my_execute_frame(self, w_inputvalue, operr) + return my_execute_frame(self, in_generator, w_arg_or_err) def _safe(s): From pypy.commits at gmail.com Sun Jul 16 08:21:13 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 05:21:13 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Merge branch py3.5 Message-ID: <596b5a39.c7331c0a.5372f.5947@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91894:51ae4831bf9b Date: 2017-07-16 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/51ae4831bf9b/ Log: Merge branch py3.5 diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -580,6 +580,7 @@ "getentropy() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") + @test_support.impl_detail(pypy=False) # on Linux, may use getrandom() def test_urandom_failure(self): # Check urandom() failing when it is not able to open /dev/random. # We spawn a new process to make the test more robust (if getrlimit() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -116,11 +116,16 @@ # # Optimization only: after we've started a Coroutine without # CO_YIELD_INSIDE_TRY, then Coroutine._finalize_() will be a no-op - if (isinstance(self, Coroutine) - and frame.last_instr == -1 - and not (self.pycode.co_flags & CO_YIELD_INSIDE_TRY)): - rgc.may_ignore_finalizer(self) - # + if frame.last_instr == -1: + if (isinstance(self, Coroutine) and + not (self.pycode.co_flags & CO_YIELD_INSIDE_TRY)): + rgc.may_ignore_finalizer(self) + + if (not space.is_w(w_arg_or_err, space.w_None) and + not isinstance(w_arg_or_err, SApplicationException)): + raise oefmt(space.w_TypeError, + "can't send non-None value to a just-started %s", + self.KIND) self.running = True try: w_result = frame.execute_frame(self, w_arg_or_err) @@ -168,12 +173,7 @@ return frame.handle_generator_error(w_arg_or_err.operr) last_instr = jit.promote(frame.last_instr) - if last_instr == -1: - if not space.is_w(w_arg_or_err, space.w_None): - raise oefmt(space.w_TypeError, - "can't send non-None value to a just-started %s", - self.KIND) - else: + if last_instr != -1: frame.pushvalue(w_arg_or_err) return r_uint(last_instr + 1) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -531,6 +531,13 @@ assert next(gen) is 1 assert next(gen) is 2 + def test_multiple_invalid_sends(self): + def mygen(): + yield 42 + g = mygen() + raises(TypeError, g.send, 2) + raises(TypeError, g.send, 2) + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -15,9 +15,9 @@ class __extend__(PyFrame): - def execute_frame(self, w_inputvalue=None, operr=None): + def execute_frame(self, in_generator=None, w_arg_or_err=None): # indirection for the optional arguments - return my_execute_frame(self, w_inputvalue, operr) + return my_execute_frame(self, in_generator, w_arg_or_err) def _safe(s): diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -10,7 +10,7 @@ from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, bootstrap_function, - build_type_checkers, cpython_api, generic_cpy_call, + build_type_checkers, cpython_api, generic_cpy_call, CANNOT_FAIL, PyTypeObjectPtr, slot_function, cts) from pypy.module.cpyext.pyobject import ( Py_DecRef, from_ref, make_ref, as_pyobj, make_typedescr) @@ -106,8 +106,14 @@ self.space, u"built-in method '%s' of '%s' object" % (self.name.decode('utf-8'), self.w_objclass.getname(self.space))) -PyCFunction_Check, PyCFunction_CheckExact = build_type_checkers( - "CFunction", W_PyCFunctionObject) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyCFunction_Check(space, w_obj): + from pypy.interpreter.function import BuiltinFunction + if w_obj is None: + return False + if isinstance(w_obj, W_PyCFunctionObject): + return True + return isinstance(w_obj, BuiltinFunction) class W_PyCClassMethodObject(W_PyCFunctionObject): w_self = None diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -442,15 +442,19 @@ Py_PRINT_RAW = 1 # No string quotes etc. @cpython_api([PyObject, FILEP, rffi.INT_real], rffi.INT_real, error=-1) -def PyObject_Print(space, w_obj, fp, flags): +def PyObject_Print(space, pyobj, fp, flags): """Print an object o, on file fp. Returns -1 on error. The flags argument is used to enable certain printing options. The only option currently supported is Py_PRINT_RAW; if given, the str() of the object is written instead of the repr().""" - if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: - w_str = space.str(w_obj) + if not pyobj: + w_str = space.newtext("") else: - w_str = space.repr(w_obj) + w_obj = from_ref(space, pyobj) + if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: + w_str = space.str(w_obj) + else: + w_str = space.repr(w_obj) count = space.len_w(w_str) data = space.text_w(w_str) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -455,7 +455,7 @@ ('tp_iter', '__iter__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -470,7 +470,7 @@ ('tp_as_mapping.c_mp_length', '__len__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @slot_function([PyObject], lltype.Signed, error=-1) @@ -497,7 +497,7 @@ ('tp_as_mapping.c_mp_subscript', '__getitem__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -514,7 +514,7 @@ ('tp_as_sequence.c_sq_inplace_repeat', '__imul__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -528,7 +528,7 @@ for tp_name, attr in [('tp_as_number.c_nb_power', '__pow__'), ]: if name == tp_name: - slot_fn = w_type.getdictvalue(space, attr) + slot_fn = w_type.lookup(attr) if slot_fn is None: return @@ -541,10 +541,10 @@ for tp_name, attr in [('tp_as_mapping.c_mp_ass_subscript', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -562,10 +562,10 @@ for tp_name, attr in [('tp_as_sequence.c_sq_ass_item', '__setitem__'), ]: if name == tp_name: - slot_ass = w_type.getdictvalue(space, attr) + slot_ass = w_type.lookup(attr) if slot_ass is None: return - slot_del = w_type.getdictvalue(space, '__delitem__') + slot_del = w_type.lookup('__delitem__') if slot_del is None: return @@ -582,8 +582,8 @@ if handled: pass elif name == 'tp_setattro': - setattr_fn = w_type.getdictvalue(space, '__setattr__') - delattr_fn = w_type.getdictvalue(space, '__delattr__') + setattr_fn = w_type.lookup('__setattr__') + delattr_fn = w_type.lookup('__delattr__') if setattr_fn is None: return @@ -598,7 +598,7 @@ return 0 slot_func = slot_tp_setattro elif name == 'tp_getattro': - getattr_fn = w_type.getdictvalue(space, '__getattribute__') + getattr_fn = w_type.lookup('__getattribute__') if getattr_fn is None: return @@ -609,7 +609,7 @@ slot_func = slot_tp_getattro elif name == 'tp_call': - call_fn = w_type.getdictvalue(space, '__call__') + call_fn = w_type.lookup('__call__') if call_fn is None: return @@ -622,7 +622,7 @@ slot_func = slot_tp_call elif name == 'tp_iternext': - iternext_fn = w_type.getdictvalue(space, '__next__') + iternext_fn = w_type.lookup('__next__') if iternext_fn is None: return @@ -638,7 +638,7 @@ slot_func = slot_tp_iternext elif name == 'tp_init': - init_fn = w_type.getdictvalue(space, '__init__') + init_fn = w_type.lookup('__init__') if init_fn is None: return @@ -651,7 +651,7 @@ return 0 slot_func = slot_tp_init elif name == 'tp_new': - new_fn = w_type.getdictvalue(space, '__new__') + new_fn = w_type.lookup('__new__') if new_fn is None: return @@ -663,7 +663,7 @@ return space.call_args(space.get(new_fn, w_self), args) slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': - buff_fn = w_type.getdictvalue(space, '__buffer__') + buff_fn = w_type.lookup('__buffer__') if buff_fn is not None: buff_w = slot_from___buffer__(space, typedef, buff_fn) elif typedef.buffer: @@ -672,7 +672,7 @@ return slot_func = buff_w elif name == 'tp_descr_get': - get_fn = w_type.getdictvalue(space, '__get__') + get_fn = w_type.lookup('__get__') if get_fn is None: return @@ -684,8 +684,8 @@ return space.call_function(get_fn, w_self, w_obj, w_value) slot_func = slot_tp_descr_get elif name == 'tp_descr_set': - set_fn = w_type.getdictvalue(space, '__set__') - delete_fn = w_type.getdictvalue(space, '__delete__') + set_fn = w_type.lookup('__set__') + delete_fn = w_type.lookup('__delete__') if set_fn is None and delete_fn is None: return diff --git a/pypy/module/cpyext/test/test_boolobject.py b/pypy/module/cpyext/test/test_boolobject.py --- a/pypy/module/cpyext/test/test_boolobject.py +++ b/pypy/module/cpyext/test/test_boolobject.py @@ -26,3 +26,20 @@ ]) assert module.get_true() == True assert module.get_false() == False + + def test_toint(self): + module = self.import_extension('foo', [ + ("to_int", "METH_O", + ''' + if (args->ob_type->tp_as_number && args->ob_type->tp_as_number->nb_int) { + return args->ob_type->tp_as_number->nb_int(args); + } + else { + PyErr_SetString(PyExc_TypeError,"cannot convert bool to int"); + return NULL; + } + '''), ]) + assert module.to_int(False) == 0 + assert module.to_int(True) == 1 + + diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -77,6 +77,31 @@ assert mod.isSameFunction(mod.getarg_O) raises(SystemError, mod.isSameFunction, 1) + def test_check(self): + mod = self.import_extension('foo', [ + ('check', 'METH_O', + ''' + return PyLong_FromLong(PyCFunction_Check(args)); + '''), + ]) + from math import degrees + assert mod.check(degrees) == 1 + assert mod.check(list) == 0 + assert mod.check(sorted) == 1 + def func(): + pass + class A(object): + def meth(self): + pass + @staticmethod + def stat(): + pass + assert mod.check(func) == 0 + assert mod.check(A) == 0 + assert mod.check(A.meth) == 0 + assert mod.check(A.stat) == 0 + + class TestPyCMethodObject(BaseApiTest): def test_repr(self, space, api): """ diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -288,13 +288,20 @@ if (fp == NULL) Py_RETURN_NONE; ret = PyObject_Print(obj, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } + ret = PyObject_Print(NULL, fp, Py_PRINT_RAW); + if (ret < 0) { + fclose(fp); + return NULL; + } fclose(fp); - if (ret < 0) - return NULL; Py_RETURN_TRUE; """)]) assert module.dump(self.tmpname, None) - assert open(self.tmpname).read() == 'None' + assert open(self.tmpname).read() == 'None' def test_issue1970(self): module = self.import_extension('foo', [ diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -750,20 +750,12 @@ "cannot convert a (unicode) str object to bytes") # sequence of bytes - w_iter = space.iter(w_source) - length_hint = space.length_hint(w_source, 0) - builder = StringBuilder(length_hint) - while True: - try: - w_item = space.next(w_iter) - except OperationError as e: - if not e.match(space, space.w_StopIteration): - raise - break - value = space.byte_w(w_item) - builder.append(value) - return builder.build() - + w_result = space.appexec([w_source], """(seq): + result = bytearray() + for i in seq: + result.append(i) + return result""") + return w_result.getdata() W_BytesObject.typedef = TypeDef( "bytes", None, None, "read", diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -639,7 +639,10 @@ addresses_of_static_ptrs = ( self.layoutbuilder.addresses_of_static_ptrs_in_nongc + self.layoutbuilder.addresses_of_static_ptrs) - log.info("found %s static roots" % (len(addresses_of_static_ptrs), )) + if len(addresses_of_static_ptrs) == 1: + log.info("found 1 static root") + else: + log.info("found %s static roots" % (len(addresses_of_static_ptrs), )) ll_static_roots_inside = lltype.malloc(lltype.Array(llmemory.Address), len(addresses_of_static_ptrs), immortal=True) From pypy.commits at gmail.com Sun Jul 16 08:24:01 2017 From: pypy.commits at gmail.com (rlamy) Date: Sun, 16 Jul 2017 05:24:01 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Merged in mad-marty/pypy/py3.6 (pull request #556) Message-ID: <596b5ae1.c68b1c0a.71522.3b1f@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r91896:f593f803fcdb Date: 2017-07-16 12:23 +0000 http://bitbucket.org/pypy/pypy/changeset/f593f803fcdb/ Log: Merged in mad-marty/pypy/py3.6 (pull request #556) (ronan,pzieschang) fixed int() behaviour to mirror python 3.6, where a real int must be returned diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -277,6 +277,7 @@ "expected %s, got %T object", expected, self) def int(self, space): + from pypy.objspace.std.intobject import _new_int w_impl = space.lookup(self, '__int__') if w_impl is None: self._typed_unwrap_error(space, "integer") @@ -291,6 +292,8 @@ "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of " "Python." % (tp,)), space.w_DeprecationWarning) + # convert to int to be like python 3.6 + w_result = _new_int(space, space.w_int, w_result) return w_result raise oefmt(space.w_TypeError, "__int__ returned non-int (type '%T')", w_result) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -315,7 +315,8 @@ return IntSubclass(42) n = int(ReturnsIntSubclass()) assert n == 42 - assert type(n) is IntSubclass + # cpython 3.6 fixed behaviour to actually return type int here + assert type(n) is int def test_trunc_returns(self): # but!: (blame CPython 2.7) From pypy.commits at gmail.com Sun Jul 16 08:23:59 2017 From: pypy.commits at gmail.com (p_zi...@yahoo.de) Date: Sun, 16 Jul 2017 05:23:59 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (ronan, pzieschang) fixed int() behaviour to mirror python 3.6, where a real int must be returned Message-ID: <596b5adf.02d91c0a.87261.27d9@mx.google.com> Author: p_zieschang at yahoo.de Branch: py3.6 Changeset: r91895:caa89dbcd27a Date: 2017-07-16 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/caa89dbcd27a/ Log: (ronan,pzieschang) fixed int() behaviour to mirror python 3.6, where a real int must be returned diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -277,6 +277,7 @@ "expected %s, got %T object", expected, self) def int(self, space): + from pypy.objspace.std.intobject import _new_int w_impl = space.lookup(self, '__int__') if w_impl is None: self._typed_unwrap_error(space, "integer") @@ -291,6 +292,8 @@ "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of " "Python." % (tp,)), space.w_DeprecationWarning) + # convert to int to be like python 3.6 + w_result = _new_int(space, space.w_int, w_result) return w_result raise oefmt(space.w_TypeError, "__int__ returned non-int (type '%T')", w_result) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -315,7 +315,8 @@ return IntSubclass(42) n = int(ReturnsIntSubclass()) assert n == 42 - assert type(n) is IntSubclass + # cpython 3.6 fixed behaviour to actually return type int here + assert type(n) is int def test_trunc_returns(self): # but!: (blame CPython 2.7) From pypy.commits at gmail.com Sun Jul 16 08:51:03 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 05:51:03 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Merge heads Message-ID: <596b6137.55201c0a.7b8bb.6a0b@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91898:1739c19d3d16 Date: 2017-07-16 14:50 +0200 http://bitbucket.org/pypy/pypy/changeset/1739c19d3d16/ Log: Merge heads diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -277,6 +277,7 @@ "expected %s, got %T object", expected, self) def int(self, space): + from pypy.objspace.std.intobject import _new_int w_impl = space.lookup(self, '__int__') if w_impl is None: self._typed_unwrap_error(space, "integer") @@ -291,6 +292,8 @@ "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of " "Python." % (tp,)), space.w_DeprecationWarning) + # convert to int to be like python 3.6 + w_result = _new_int(space, space.w_int, w_result) return w_result raise oefmt(space.w_TypeError, "__int__ returned non-int (type '%T')", w_result) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -315,7 +315,8 @@ return IntSubclass(42) n = int(ReturnsIntSubclass()) assert n == 42 - assert type(n) is IntSubclass + # cpython 3.6 fixed behaviour to actually return type int here + assert type(n) is int def test_trunc_returns(self): # but!: (blame CPython 2.7) From pypy.commits at gmail.com Sun Jul 16 08:51:01 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 05:51:01 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add test for async generators with asend and throw, and vice-versa Message-ID: <596b6135.07bf1c0a.5d00.10be@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91897:6f9eb0f25f77 Date: 2017-07-16 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/6f9eb0f25f77/ Log: Add test for async generators with asend and throw, and vice-versa diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -383,3 +383,33 @@ e = raises(StopIteration, gs.send, "bar") assert e.value.args == () """ + + def test_async_yield_asend_notnone_throw(self): """ + async def f(): + yield 123 + + raises(ValueError, f().asend(42).throw, ValueError) + """ + + def test_async_yield_asend_none_throw(self): """ + async def f(): + yield 123 + + raises(ValueError, f().asend(None).throw, ValueError) + """ + + def test_async_yield_athrow_send_none(self): """ + async def ag(): + yield 42 + + raises(ValueError, ag().athrow(ValueError).send, None) + """ + + def test_async_yield_athrow_send_notnone(self): """ + async def ag(): + yield 42 + + ex = raises(RuntimeError, ag().athrow(ValueError).send, 42) + expected = ("can't send non-None value to a just-started coroutine", ) + assert ex.value.args == expected + """ From pypy.commits at gmail.com Sun Jul 16 08:52:33 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 05:52:33 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: AsyncGenASend().descr_throw() Message-ID: <596b6191.b088df0a.1fa9e.2b15@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91899:75e725412a83 Date: 2017-07-16 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/75e725412a83/ Log: AsyncGenASend().descr_throw() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -615,6 +615,7 @@ state = 0 def __init__(self, async_gen, w_value_to_send): + self.space = async_gen.space self.async_gen = async_gen self.w_value_to_send = w_value_to_send @@ -622,20 +623,27 @@ return self def descr__next__(self): - space = self.async_gen.space - return self.send_ex(space.w_None) + return self.send_ex(self.space.w_None) def descr_send(self, w_arg): return self.send_ex(w_arg) def descr_throw(self, w_type, w_val=None, w_tb=None): - XXX + space = self.space + if self.state == 2: + raise OperationError(space.w_StopIteration, space.w_None) + try: + w_value = self.async_gen.throw(w_type, w_val, w_tb) + return self.unwrap_value(w_value) + except OperationError as e: + self.state = 2 + raise def descr_close(self): XXX def send_ex(self, w_arg_or_err): - space = self.async_gen.space + space = self.space if self.state == 2: raise OperationError(space.w_StopIteration, space.w_None) @@ -655,10 +663,13 @@ try: w_value = self.async_gen.send_ex(w_arg_or_err) - if isinstance(w_value, AsyncGenValueWrapper): - raise OperationError(space.w_StopIteration, w_value.w_value) - else: - return w_value + return self.unwrap_value(w_value) except OperationError as e: self.state = 2 raise + + def unwrap_value(self, w_value): + if isinstance(w_value, AsyncGenValueWrapper): + raise OperationError(self.space.w_StopIteration, w_value.w_value) + else: + return w_value From pypy.commits at gmail.com Sun Jul 16 09:03:50 2017 From: pypy.commits at gmail.com (p_zi...@yahoo.de) Date: Sun, 16 Jul 2017 06:03:50 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (ronan, pzieschang) fixed int() behaviour also for __trunc__ returning a subclass of int Message-ID: <596b6436.c33e1c0a.980a4.80a7@mx.google.com> Author: p_zieschang at yahoo.de Branch: py3.6 Changeset: r91900:3ed5101a25c4 Date: 2017-07-16 14:46 +0200 http://bitbucket.org/pypy/pypy/changeset/3ed5101a25c4/ Log: (ronan,pzieschang) fixed int() behaviour also for __trunc__ returning a subclass of int diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -865,7 +865,7 @@ return _from_intlike(space, w_inttype, space.int(w_value)) elif space.lookup(w_value, '__trunc__') is not None: w_obj = space.trunc(w_value) - if not space.isinstance_w(w_obj, space.w_int): + if not space.is_w(space.type(w_obj), space.w_int): w_obj = space.int(w_obj) return _from_intlike(space, w_inttype, w_obj) elif space.isinstance_w(w_value, space.w_unicode): diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -337,7 +337,7 @@ return IntSubclass(42) n = int(TruncReturnsNonInt()) assert n == 42 - assert type(n) is IntSubclass + assert type(n) is int def test_long_before_string(self): class A(str): From pypy.commits at gmail.com Sun Jul 16 09:03:52 2017 From: pypy.commits at gmail.com (rlamy) Date: Sun, 16 Jul 2017 06:03:52 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Merged in mad-marty/pypy/py3.6 (pull request #557) Message-ID: <596b6438.9aa0df0a.c208b.f8be@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r91901:b69f41d7b76d Date: 2017-07-16 13:03 +0000 http://bitbucket.org/pypy/pypy/changeset/b69f41d7b76d/ Log: Merged in mad-marty/pypy/py3.6 (pull request #557) (ronan,pzieschang) fixed int() behaviour also for __trunc__ returning a subclass of int diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -865,7 +865,7 @@ return _from_intlike(space, w_inttype, space.int(w_value)) elif space.lookup(w_value, '__trunc__') is not None: w_obj = space.trunc(w_value) - if not space.isinstance_w(w_obj, space.w_int): + if not space.is_w(space.type(w_obj), space.w_int): w_obj = space.int(w_obj) return _from_intlike(space, w_inttype, w_obj) elif space.isinstance_w(w_value, space.w_unicode): diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -337,7 +337,7 @@ return IntSubclass(42) n = int(TruncReturnsNonInt()) assert n == 42 - assert type(n) is IntSubclass + assert type(n) is int def test_long_before_string(self): class A(str): From pypy.commits at gmail.com Sun Jul 16 09:20:05 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 06:20:05 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: support for athrow() Message-ID: <596b6805.ce8c1c0a.e022d.ab9f@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91902:32f19625dc25 Date: 2017-07-16 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/32f19625dc25/ Log: support for athrow() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -186,8 +186,8 @@ try: if isinstance(w_yf, GeneratorOrCoroutine): w_retval = w_yf.send_ex(w_inputvalue_or_err) - elif isinstance(w_yf, AsyncGenASend): - w_retval = w_yf.send_ex(w_inputvalue_or_err) + elif isinstance(w_yf, AsyncGenASend): # performance only + w_retval = w_yf.do_send(w_inputvalue_or_err) elif space.is_w(w_inputvalue_or_err, space.w_None): w_retval = space.next(w_yf) else: @@ -599,8 +599,7 @@ return AsyncGenASend(self, w_arg) def descr_athrow(self, w_type, w_val=None, w_tb=None): - XXX - return AsyncGenAThrow(w_type, w_val, w_tb) + return AsyncGenAThrow(self, w_type, w_val, w_tb) def descr_aclose(self): XXX @@ -611,22 +610,21 @@ self.w_value = w_value -class AsyncGenASend(W_Root): +class AsyncGenABase(W_Root): state = 0 - def __init__(self, async_gen, w_value_to_send): + def __init__(self, async_gen): self.space = async_gen.space self.async_gen = async_gen - self.w_value_to_send = w_value_to_send def descr__iter__(self): return self def descr__next__(self): - return self.send_ex(self.space.w_None) + return self.do_send(self.space.w_None) def descr_send(self, w_arg): - return self.send_ex(w_arg) + return self.do_send(w_arg) def descr_throw(self, w_type, w_val=None, w_tb=None): space = self.space @@ -642,7 +640,20 @@ def descr_close(self): XXX - def send_ex(self, w_arg_or_err): + def unwrap_value(self, w_value): + if isinstance(w_value, AsyncGenValueWrapper): + raise OperationError(self.space.w_StopIteration, w_value.w_value) + else: + return w_value + + +class AsyncGenASend(AsyncGenABase): + + def __init__(self, async_gen, w_value_to_send): + AsyncGenABase.__init__(self, async_gen) + self.w_value_to_send = w_value_to_send + + def do_send(self, w_arg_or_err): space = self.space if self.state == 2: raise OperationError(space.w_StopIteration, space.w_None) @@ -668,8 +679,29 @@ self.state = 2 raise - def unwrap_value(self, w_value): - if isinstance(w_value, AsyncGenValueWrapper): - raise OperationError(self.space.w_StopIteration, w_value.w_value) + +class AsyncGenAThrow(AsyncGenABase): + + def __init__(self, async_gen, w_exc_type, w_exc_value, w_exc_tb): + AsyncGenABase.__init__(self, async_gen) + self.w_exc_type = w_exc_type + self.w_exc_value = w_exc_value + self.w_exc_tb = w_exc_tb + + def do_send(self, w_arg_or_err): + # XXX FAR MORE COMPLICATED IN CPYTHON + space = self.space + if self.state == 2: + raise OperationError(space.w_StopIteration, space.w_None) + + if self.state == 0: + if not space.is_w(w_arg_or_err, space.w_None): + raise OperationError(space.w_RuntimeError, space.newtext( + "can't send non-None value to a just-started coroutine")) + self.state = 1 + w_value = self.async_gen.throw(self.w_exc_type, + self.w_exc_value, + self.w_exc_tb) else: - return w_value + w_value = self.async_gen.send_ex(w_arg_or_err) + return self.unwrap_value(w_value) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -487,8 +487,9 @@ ClassMethod, BuiltinFunction, descr_function_get) from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter.generator import GeneratorIterator, Coroutine -from pypy.interpreter.generator import AsyncGenerator, AsyncGenASend from pypy.interpreter.generator import CoroutineWrapper, AIterWrapper +from pypy.interpreter.generator import AsyncGenerator +from pypy.interpreter.generator import AsyncGenASend, AsyncGenAThrow from pypy.interpreter.nestedscope import Cell from pypy.interpreter.special import NotImplemented, Ellipsis @@ -905,14 +906,14 @@ send = interp2app(AsyncGenASend.descr_send), throw = interp2app(AsyncGenASend.descr_throw), ) -#AsyncGenAThrow.typedef = TypeDef("async_generator_athrow", -# __await__ = interp2app(AsyncGenAThrow.descr__iter__), -# __iter__ = interp2app(AsyncGenAThrow.descr__iter__), -# __next__ = interp2app(AsyncGenAThrow.descr__next__), -# close = interp2app(AsyncGenAThrow.descr_close), -# send = interp2app(AsyncGenAThrow.descr_send), -# throw = interp2app(AsyncGenAThrow.descr_throw), -#) +AsyncGenAThrow.typedef = TypeDef("async_generator_athrow", + __await__ = interp2app(AsyncGenAThrow.descr__iter__), + __iter__ = interp2app(AsyncGenAThrow.descr__iter__), + __next__ = interp2app(AsyncGenAThrow.descr__next__), + close = interp2app(AsyncGenAThrow.descr_close), + send = interp2app(AsyncGenAThrow.descr_send), + throw = interp2app(AsyncGenAThrow.descr_throw), +) Cell.typedef = TypeDef("cell", __total_ordering__ = 'auto', From pypy.commits at gmail.com Sun Jul 16 09:31:03 2017 From: pypy.commits at gmail.com (fijal) Date: Sun, 16 Jul 2017 06:31:03 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add my talk draft Message-ID: <596b6a97.495d1c0a.60311.21ec@mx.google.com> Author: fijal Branch: extradoc Changeset: r5821:a123560be7b9 Date: 2017-07-16 15:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/a123560be7b9/ Log: add my talk draft diff --git a/talk/pyconru-2017/talk.key b/talk/pyconru-2017/talk.key new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..ce8110f8dee6bb6dfffe662711c6ad55d6f5b248 GIT binary patch [cut] From pypy.commits at gmail.com Sun Jul 16 09:58:31 2017 From: pypy.commits at gmail.com (p_zi...@yahoo.de) Date: Sun, 16 Jul 2017 06:58:31 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: (ronan, pzieschang) fixed test_intobject behaviour for python 3.6 as well with bool Message-ID: <596b7107.55201c0a.7b8bb.7e08@mx.google.com> Author: p_zieschang at yahoo.de Branch: py3.6 Changeset: r91903:0c4aaad09618 Date: 2017-07-16 15:36 +0200 http://bitbucket.org/pypy/pypy/changeset/0c4aaad09618/ Log: (ronan,pzieschang) fixed test_intobject behaviour for python 3.6 as well with bool diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -405,10 +405,10 @@ value = 4200000000000000000000000000000000 assert int(j()) == 4200000000000000000000000000000000 value = subint(42) - assert int(j()) == 42 and type(int(j())) is subint + assert int(j()) == 42 and type(int(j())) is int value = subint(4200000000000000000000000000000000) assert (int(j()) == 4200000000000000000000000000000000 - and type(int(j())) is subint) + and type(int(j())) is int) value = 42.0 raises(TypeError, int, j()) value = "foo" @@ -473,7 +473,7 @@ return True n = int(TruncReturnsNonInt()) assert n == 1 - assert type(n) is bool + assert type(n) is int def test_int_before_string(self): class Integral(str): @@ -639,7 +639,7 @@ warnings.simplefilter("always", DeprecationWarning) n = int(bad) m = _operator.index(bad) - assert n is True + assert n == 1 and type(n) is int assert m is False assert len(log) == 2 From pypy.commits at gmail.com Sun Jul 16 09:58:33 2017 From: pypy.commits at gmail.com (rlamy) Date: Sun, 16 Jul 2017 06:58:33 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Merged in mad-marty/pypy/py3.6 (pull request #558) Message-ID: <596b7109.0c911c0a.1a61e.706c@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r91904:6e9e65d8b76a Date: 2017-07-16 13:58 +0000 http://bitbucket.org/pypy/pypy/changeset/6e9e65d8b76a/ Log: Merged in mad-marty/pypy/py3.6 (pull request #558) (ronan,pzieschang) fixed test_intobject behaviour for python 3.6 as well with bool diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -405,10 +405,10 @@ value = 4200000000000000000000000000000000 assert int(j()) == 4200000000000000000000000000000000 value = subint(42) - assert int(j()) == 42 and type(int(j())) is subint + assert int(j()) == 42 and type(int(j())) is int value = subint(4200000000000000000000000000000000) assert (int(j()) == 4200000000000000000000000000000000 - and type(int(j())) is subint) + and type(int(j())) is int) value = 42.0 raises(TypeError, int, j()) value = "foo" @@ -473,7 +473,7 @@ return True n = int(TruncReturnsNonInt()) assert n == 1 - assert type(n) is bool + assert type(n) is int def test_int_before_string(self): class Integral(str): @@ -639,7 +639,7 @@ warnings.simplefilter("always", DeprecationWarning) n = int(bad) m = _operator.index(bad) - assert n is True + assert n == 1 and type(n) is int assert m is False assert len(log) == 2 From pypy.commits at gmail.com Sun Jul 16 10:09:18 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 07:09:18 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add test for CPython for atrow().throw Message-ID: <596b738e.8c99df0a.16f83.0c20@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91905:1bf8606b71de Date: 2017-07-16 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/1bf8606b71de/ Log: Add test for CPython for atrow().throw In this case CPython raises RuntimeError, similar as if it was a wrong parameter to send. diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -413,3 +413,12 @@ expected = ("can't send non-None value to a just-started coroutine", ) assert ex.value.args == expected """ + + def test_async_yield_athrow_throw(self): """ + async def ag(): + yield 42 + + ex = raises(RuntimeError, ag().athrow(ValueError).throw, LookupError) + expected = ("can't send non-None value to a just-started coroutine", ) + assert ex.value.args == expected + """ From pypy.commits at gmail.com Sun Jul 16 10:22:39 2017 From: pypy.commits at gmail.com (rlamy) Date: Sun, 16 Jul 2017 07:22:39 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: fix for 6159e89116af Message-ID: <596b76af.150e1c0a.536a8.f9ec@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91906:fe19ba9deaa2 Date: 2017-07-16 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/fe19ba9deaa2/ Log: fix for 6159e89116af diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -755,7 +755,7 @@ for i in seq: result.append(i) return result""") - return w_result.getdata() + return ''.join(w_result.getdata()) W_BytesObject.typedef = TypeDef( "bytes", None, None, "read", From pypy.commits at gmail.com Sun Jul 16 10:22:41 2017 From: pypy.commits at gmail.com (rlamy) Date: Sun, 16 Jul 2017 07:22:41 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Improve performance of bytearray.extend() Message-ID: <596b76b1.c2b81c0a.d55e6.239b@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91907:75b45f45de1b Date: 2017-07-16 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/75b45f45de1b/ Log: Improve performance of bytearray.extend() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -748,8 +748,11 @@ if space.isinstance_w(w_source, space.w_unicode): raise oefmt(space.w_TypeError, "cannot convert a (unicode) str object to bytes") + return _from_byte_sequence(space, w_source) - # sequence of bytes + +def _from_byte_sequence(space, w_source): + # Split off in a separate function for the JIT's benefit w_result = space.appexec([w_source], """(seq): result = bytearray() for i in seq: From pypy.commits at gmail.com Sun Jul 16 10:23:30 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 16 Jul 2017 07:23:30 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Relax and implement the latest test Message-ID: <596b76e2.8292df0a.7c581.c6fd@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r91908:4146af845534 Date: 2017-07-16 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/4146af845534/ Log: Relax and implement the latest test diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -611,7 +611,11 @@ class AsyncGenABase(W_Root): - state = 0 + ST_INIT = 0 + ST_ITER = 1 + ST_CLOSED = 2 + + state = ST_INIT def __init__(self, async_gen): self.space = async_gen.space @@ -628,13 +632,13 @@ def descr_throw(self, w_type, w_val=None, w_tb=None): space = self.space - if self.state == 2: + if self.state == self.ST_CLOSED: raise OperationError(space.w_StopIteration, space.w_None) try: w_value = self.async_gen.throw(w_type, w_val, w_tb) return self.unwrap_value(w_value) except OperationError as e: - self.state = 2 + self.state = self.ST_CLOSED raise def descr_close(self): @@ -655,7 +659,7 @@ def do_send(self, w_arg_or_err): space = self.space - if self.state == 2: + if self.state == self.ST_CLOSED: raise OperationError(space.w_StopIteration, space.w_None) # We think that the code should look like this: @@ -667,16 +671,16 @@ # But instead, CPython's logic is this, which we think is # giving nonsense results for 'g.asend(42).send(43)': - if self.state == 0: + if self.state == self.ST_INIT: if space.is_w(w_arg_or_err, space.w_None): w_arg_or_err = self.w_value_to_send - self.state = 1 + self.state = self.ST_ITER try: w_value = self.async_gen.send_ex(w_arg_or_err) return self.unwrap_value(w_value) except OperationError as e: - self.state = 2 + self.state = self.ST_CLOSED raise @@ -691,17 +695,32 @@ def do_send(self, w_arg_or_err): # XXX FAR MORE COMPLICATED IN CPYTHON space = self.space - if self.state == 2: + if self.state == self.ST_CLOSED: raise OperationError(space.w_StopIteration, space.w_None) - if self.state == 0: + if self.state == self.ST_INIT: if not space.is_w(w_arg_or_err, space.w_None): raise OperationError(space.w_RuntimeError, space.newtext( "can't send non-None value to a just-started coroutine")) - self.state = 1 - w_value = self.async_gen.throw(self.w_exc_type, - self.w_exc_value, - self.w_exc_tb) + self.state = self.ST_ITER + throwing = True else: - w_value = self.async_gen.send_ex(w_arg_or_err) - return self.unwrap_value(w_value) + throwing = False + + try: + if throwing: + w_value = self.async_gen.throw(self.w_exc_type, + self.w_exc_value, + self.w_exc_tb) + else: + w_value = self.async_gen.send_ex(w_arg_or_err) + return self.unwrap_value(w_value) + except OperationError as e: + self.state = self.ST_CLOSED + raise + + def descr_throw(self, w_type, w_val=None, w_tb=None): + if self.state == self.ST_INIT: + raise OperationError(self.space.w_RuntimeError, + self.space.newtext("can't do async_generator.athrow().throw()")) + return AsyncGenABase.descr_throw(self, w_type, w_val, w_tb) diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -418,7 +418,6 @@ async def ag(): yield 42 - ex = raises(RuntimeError, ag().athrow(ValueError).throw, LookupError) - expected = ("can't send non-None value to a just-started coroutine", ) - assert ex.value.args == expected + raises(RuntimeError, ag().athrow(ValueError).throw, LookupError) + # CPython's message makes little sense; PyPy's message is different """ From pypy.commits at gmail.com Sun Jul 16 11:01:02 2017 From: pypy.commits at gmail.com (rmariano) Date: Sun, 16 Jul 2017 08:01:02 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add test for async generator fail while running Message-ID: <596b7fae.4492df0a.dfc25.4d3b@mx.google.com> Author: Mariano Anaya Branch: py3.6 Changeset: r91909:b6e947786fa5 Date: 2017-07-16 17:00 +0200 http://bitbucket.org/pypy/pypy/changeset/b6e947786fa5/ Log: Add test for async generator fail while running Check when an exception is thrown by calling `.athrow()` while on the middle of the execution. diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -421,3 +421,30 @@ raises(RuntimeError, ag().athrow(ValueError).throw, LookupError) # CPython's message makes little sense; PyPy's message is different """ + + def test_async_yield_athrow_while_running(self): """ + values = [] + async def ag(): + try: + received = yield 1 + except ValueError: + values.append(42) + return + yield 2 + + + async def run(): + running = ag() + x = await running.asend(None) + assert x == 1 + try: + await running.athrow(ValueError) + except StopAsyncIteration: + pass + + + try: + run().send(None) + except StopIteration: + assert values == [42] + """ From pypy.commits at gmail.com Sun Jul 16 12:02:33 2017 From: pypy.commits at gmail.com (antocuni) Date: Sun, 16 Jul 2017 09:02:33 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-jit: remove cpyext from the JIT policy blacklist Message-ID: <596b8e19.a29adf0a.2581e.dc3a@mx.google.com> Author: Antonio Cuni Branch: cpyext-jit Changeset: r91911:ae94f6ae3278 Date: 2017-07-16 14:54 +0100 http://bitbucket.org/pypy/pypy/changeset/ae94f6ae3278/ Log: remove cpyext from the JIT policy blacklist diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ return True if '.' in modname: modname, rest = modname.split('.', 1) - if modname in ['unicodedata', 'gc', '_minimal_curses', 'cpyext']: + if modname in ['unicodedata', 'gc', '_minimal_curses']: return False else: rest = '' From pypy.commits at gmail.com Sun Jul 16 12:02:35 2017 From: pypy.commits at gmail.com (antocuni) Date: Sun, 16 Jul 2017 09:02:35 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-jit: add enough @jit.dont_look_inside so that pypy translates Message-ID: <596b8e1b.55131c0a.edfc5.8df1@mx.google.com> Author: Antonio Cuni Branch: cpyext-jit Changeset: r91912:aee286949d64 Date: 2017-07-16 13:06 +0100 http://bitbucket.org/pypy/pypy/changeset/aee286949d64/ Log: add enough @jit.dont_look_inside so that pypy translates diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -1,4 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import jit from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module @@ -15,7 +16,7 @@ INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) - + at jit.dont_look_inside def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -443,6 +443,11 @@ if func.__name__ in FUNCTIONS_BY_HEADER[header]: raise ValueError("%s already registered" % func.__name__) func._always_inline_ = 'try' + # + # XXX: should we @jit.dont_look_inside all the @cpython_api functions, + # or we should only disable some of them? + func._jit_look_inside_ = False + # api_function = ApiFunction( argtypes, restype, func, error=_compute_error(error, restype), gil=gil, diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -1,4 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.rlib import jit from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt @@ -186,6 +187,7 @@ (self.method_name, self.w_objclass.name)) + at jit.dont_look_inside def cwrapper_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCWrapperObject, w_self) args_w, kw_w = __args__.unpack() @@ -197,6 +199,7 @@ return self.call(space, w_self, w_args, w_kw) + at jit.dont_look_inside def cfunction_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCFunctionObject, w_self) args_w, kw_w = __args__.unpack() @@ -207,6 +210,7 @@ ret = self.call(space, None, w_args, w_kw) return ret + at jit.dont_look_inside def cmethod_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCFunctionObject, w_self) args_w, kw_w = __args__.unpack() diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -14,7 +14,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib import rawrefcount +from rpython.rlib import rawrefcount, jit from rpython.rlib.debug import fatalerror @@ -151,6 +151,7 @@ class InvalidPointerException(Exception): pass + at jit.dont_look_inside def create_ref(space, w_obj, w_userdata=None): """ Allocates a PyObject, and fills its fields with info from the given @@ -190,6 +191,7 @@ w_marker_deallocating = W_Root() + at jit.dont_look_inside def from_ref(space, ref): """ Finds the interpreter object corresponding to the given reference. If the @@ -227,6 +229,8 @@ assert isinstance(w_type, W_TypeObject) return get_typedescr(w_type.layout.typedef).realize(space, ref) + + at jit.dont_look_inside def as_pyobj(space, w_obj, w_userdata=None): """ Returns a 'PyObject *' representing the given intepreter object. From pypy.commits at gmail.com Sun Jul 16 12:02:31 2017 From: pypy.commits at gmail.com (antocuni) Date: Sun, 16 Jul 2017 09:02:31 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-jit: experimental branch in which to make cpyext visible to the JIT Message-ID: <596b8e17.d685df0a.743d8.7bfd@mx.google.com> Author: Antonio Cuni Branch: cpyext-jit Changeset: r91910:1327063282d7 Date: 2017-07-16 14:59 +0100 http://bitbucket.org/pypy/pypy/changeset/1327063282d7/ Log: experimental branch in which to make cpyext visible to the JIT From pypy.commits at gmail.com Sun Jul 16 12:02:37 2017 From: pypy.commits at gmail.com (antocuni) Date: Sun, 16 Jul 2017 09:02:37 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-jit: merge part of the cpyext-callopt branch, up to commit 9cbc8bd76297: this should simplify and speedup the call of simple functions/methods Message-ID: <596b8e1d.8292df0a.7c581.d738@mx.google.com> Author: Antonio Cuni Branch: cpyext-jit Changeset: r91913:25ba96ce5970 Date: 2017-07-16 15:20 +0100 http://bitbucket.org/pypy/pypy/changeset/25ba96ce5970/ Log: merge part of the cpyext-callopt branch, up to commit 9cbc8bd76297: this should simplify and speedup the call of simple functions/methods diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -43,8 +43,8 @@ from pypy.module.cpyext.object import _dealloc _dealloc(space, py_obj) - class W_PyCFunctionObject(W_Root): + # TODO create a slightly different class depending on the c_ml_flags def __init__(self, space, ml, w_self, w_module=None): self.ml = ml self.name = rffi.charp2str(rffi.cast(rffi.CCHARP,self.ml.c_ml_name)) @@ -57,7 +57,7 @@ w_self = self.w_self flags = rffi.cast(lltype.Signed, self.ml.c_ml_flags) flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST) - if space.is_true(w_kw) and not flags & METH_KEYWORDS: + if not flags & METH_KEYWORDS and space.is_true(w_kw): raise oefmt(space.w_TypeError, "%s() takes no keyword arguments", self.name) @@ -97,6 +97,20 @@ else: return space.w_None +class W_PyCFunctionObjectNoArgs(W_PyCFunctionObject): + def call(self, space, w_self, w_args, w_kw): + # Call the C function + if w_self is None: + w_self = self.w_self + func = self.ml.c_ml_meth + return generic_cpy_call(space, func, w_self, None) + +class W_PyCFunctionObjectSingleObject(W_PyCFunctionObject): + def call(self, space, w_self, w_o, w_kw): + if w_self is None: + w_self = self.w_self + func = self.ml.c_ml_meth + return generic_cpy_call(space, func, w_self, w_o) class W_PyCMethodObject(W_PyCFunctionObject): w_self = None @@ -198,11 +212,22 @@ space.setitem(w_kw, space.newtext(key), w_obj) return self.call(space, w_self, w_args, w_kw) +def cfunction_descr_call_noargs(space, w_self): + # special case for calling with flags METH_NOARGS + self = space.interp_w(W_PyCFunctionObjectNoArgs, w_self) + return self.call(space, None, None, None) + +def cfunction_descr_call_single_object(space, w_self, w_o): + # special case for calling with flags METH_O + self = space.interp_w(W_PyCFunctionObjectSingleObject, w_self) + return self.call(space, None, w_o, None) @jit.dont_look_inside def cfunction_descr_call(space, w_self, __args__): + # specialize depending on the W_PyCFunctionObject self = space.interp_w(W_PyCFunctionObject, w_self) args_w, kw_w = __args__.unpack() + # XXX __args__.unpack is slow w_args = space.newtuple(args_w) w_kw = space.newdict() for key, w_obj in kw_w.items(): @@ -247,6 +272,26 @@ ) W_PyCFunctionObject.typedef.acceptable_as_base_class = False +W_PyCFunctionObjectNoArgs.typedef = TypeDef( + 'builtin_function_or_method', W_PyCFunctionObject.typedef, + __call__ = interp2app(cfunction_descr_call_noargs), + __doc__ = GetSetProperty(W_PyCFunctionObjectNoArgs.get_doc), + __module__ = interp_attrproperty_w('w_module', cls=W_PyCFunctionObjectNoArgs), + __name__ = interp_attrproperty('name', cls=W_PyCFunctionObjectNoArgs, + wrapfn="newtext_or_none"), + ) +W_PyCFunctionObjectNoArgs.typedef.acceptable_as_base_class = False + +W_PyCFunctionObjectSingleObject.typedef = TypeDef( + 'builtin_function_or_method', W_PyCFunctionObject.typedef, + __call__ = interp2app(cfunction_descr_call_single_object), + __doc__ = GetSetProperty(W_PyCFunctionObjectSingleObject.get_doc), + __module__ = interp_attrproperty_w('w_module', cls=W_PyCFunctionObjectSingleObject), + __name__ = interp_attrproperty('name', cls=W_PyCFunctionObjectSingleObject, + wrapfn="newtext_or_none"), + ) +W_PyCFunctionObjectSingleObject.typedef.acceptable_as_base_class = False + W_PyCMethodObject.typedef = TypeDef( 'method', __get__ = interp2app(cmethod_descr_get), diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -1,11 +1,13 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, cpython_struct, \ - METH_STATIC, METH_CLASS, METH_COEXIST, CANNOT_FAIL, CONST_STRING + METH_STATIC, METH_CLASS, METH_COEXIST, CANNOT_FAIL, CONST_STRING, \ + METH_NOARGS, METH_O from pypy.module.cpyext.pyobject import PyObject, as_pyobj from pypy.interpreter.module import Module from pypy.module.cpyext.methodobject import ( W_PyCFunctionObject, PyCFunction_NewEx, PyDescr_NewMethod, - PyMethodDef, PyDescr_NewClassMethod, PyStaticMethod_New) + PyMethodDef, PyDescr_NewClassMethod, PyStaticMethod_New, + W_PyCFunctionObjectNoArgs, W_PyCFunctionObjectSingleObject) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.module.cpyext.state import State from pypy.interpreter.error import oefmt @@ -79,6 +81,13 @@ space.newtext(rffi.charp2str(doc))) return w_mod # borrowed result kept alive in PyImport_AddModule() +def _create_pyc_function_object(space, method, w_self, w_name, flags): + flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST) + if flags == METH_NOARGS: + return W_PyCFunctionObjectNoArgs(space, method, w_self, w_name) + if flags == METH_O: + return W_PyCFunctionObjectSingleObject(space, method, w_self, w_name) + return W_PyCFunctionObject(space, method, w_self, w_name) def convert_method_defs(space, dict_w, methods, w_type, w_self=None, name=None): w_name = space.newtext_or_none(name) @@ -98,7 +107,8 @@ raise oefmt(space.w_ValueError, "module functions cannot set METH_CLASS or " "METH_STATIC") - w_obj = W_PyCFunctionObject(space, method, w_self, w_name) + w_obj = _create_pyc_function_object(space, method, w_self, + w_name, flags) else: if methodname in dict_w and not (flags & METH_COEXIST): continue From pypy.commits at gmail.com Mon Jul 17 05:22:47 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:47 -0700 (PDT) Subject: [pypy-commit] stmgc c8-efficient-serial-execution-master: Fix syntax error Message-ID: <596c81e7.100a1c0a.52de.61a5@mx.google.com> Author: Tobias Weber Branch: c8-efficient-serial-execution-master Changeset: r2109:88ffffe69ad8 Date: 2017-07-13 22:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/88ffffe69ad8/ Log: Fix syntax error diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -500,7 +500,7 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; - assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic) + assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic); if (pseg->commit_if_not_atomic && pseg->transaction_state == TS_INEVITABLE && pseg->pub.running_thread->self_or_0_if_atomic != 0) { From pypy.commits at gmail.com Mon Jul 17 05:22:43 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:43 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Update from efficient serial execution master with starvation fix Message-ID: <596c81e3.17addf0a.3c158.8e36@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2107:3868dfdf70fd Date: 2017-07-12 19:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/3868dfdf70fd/ Log: Update from efficient serial execution master with starvation fix diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1636,7 +1636,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = 1; timing_become_inevitable(); @@ -1647,42 +1647,42 @@ if (msg != MSG_INEV_DONT_SLEEP) { dprintf(("become_inevitable: %s\n", msg)); - if (any_soon_finished_or_inevitable_thread_segment() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment()) { #if STM_TESTS /* for tests: another transaction */ stm_abort_transaction(); /* is already inevitable, abort */ #endif + signal_commit_to_inevitable_transaction(); + s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested()) { - - signal_commit_to_inevitable_transaction(); + !safe_point_requested() && + num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { - s_mutex_unlock(); - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - goto retry_from_start; + if (cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + num_waits++; } - num_waits++; } s_mutex_unlock(); + /* XXX try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); + } goto retry_from_start; } else { EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) { + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); goto retry_from_start; } } diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -229,14 +229,18 @@ return STM_PSEGMENT->atomic_nesting_levels; } +// max intptr_t value is 7FFFFFFFFFFFFFFF on 64-bit => larger than 2 * huge value #define HUGE_INTPTR_VALUE 0x3000000000000000L void stm_enable_atomic(stm_thread_local_t *tl) { if (!stm_is_atomic(tl)) { + // do for outermost atomic block only tl->self_or_0_if_atomic = 0; /* increment 'nursery_mark' by HUGE_INTPTR_VALUE, so that - stm_should_break_transaction() returns always false */ + stm_should_break_transaction() returns always false. + preserves the previous nursery_mark, unless it is < 0 + or >= huge value */ intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark; if (mark < 0) mark = 0; @@ -256,6 +260,7 @@ STM_PSEGMENT->atomic_nesting_levels--; if (STM_PSEGMENT->atomic_nesting_levels == 0) { + // revert changes by stm_enable_atomic only if we left the outermost atomic block tl->self_or_0_if_atomic = (intptr_t)tl; /* decrement 'nursery_mark' by HUGE_INTPTR_VALUE, to cancel what was done in stm_enable_atomic() */ From pypy.commits at gmail.com Mon Jul 17 05:22:45 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:45 -0700 (PDT) Subject: [pypy-commit] stmgc c8-efficient-serial-execution-master: Fix commit signal was not reset on transaction start Message-ID: <596c81e5.4a90df0a.afcbf.ea19@mx.google.com> Author: Tobias Weber Branch: c8-efficient-serial-execution-master Changeset: r2108:b5c1dadb9699 Date: 2017-07-13 21:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/b5c1dadb9699/ Log: Fix commit signal was not reset on transaction start diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1116,6 +1116,7 @@ } _do_start_transaction(tl); + STM_PSEGMENT->commit_if_not_atomic = false; if (repeat_count == 0) { /* else, 'nursery_mark' was already set in abort_data_structures_from_segment_num() */ STM_SEGMENT->nursery_mark = ((stm_char *)_stm_nursery_start + diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -500,8 +500,9 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic) if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + && pseg->transaction_state == TS_INEVITABLE && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Mon Jul 17 05:22:52 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:52 -0700 (PDT) Subject: [pypy-commit] stmgc c8-overheads-instrumentation: Instrument more of the major GC Message-ID: <596c81ec.a494df0a.8bcf5.9570@mx.google.com> Author: Tobias Weber Branch: c8-overheads-instrumentation Changeset: r2112:00fa92be3c0f Date: 2017-07-14 17:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/00fa92be3c0f/ Log: Instrument more of the major GC diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -732,16 +732,21 @@ static void major_do_validation_and_minor_collections(void) { + start_timer(); + int original_num = STM_SEGMENT->segment_num; long i; assert(_has_mutex()); /* including the sharing seg0 */ - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 0; i < NB_SEGMENTS; i++) { // TODO why is this strictly smaller than? ensure_gs_register(i); + pause_timer(); bool ok = _stm_validate(); + continue_timer(); + assert(get_priv_segment(i)->last_commit_log_entry->next == NULL || get_priv_segment(i)->last_commit_log_entry->next == INEV_RUNNING); if (!ok) { @@ -776,7 +781,9 @@ Collecting might fail due to invalid state. */ if (!must_abort()) { + pause_timer(); _do_minor_collection(/*commit=*/ false); + continue_timer(); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); } else { @@ -786,6 +793,8 @@ } ensure_gs_register(original_num); + + stop_timer_and_publish(STM_DURATION_MAJOR_GC_FULL); } From pypy.commits at gmail.com Mon Jul 17 05:22:54 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:54 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge fix of commit signalling fix Message-ID: <596c81ee.1db7df0a.e9b9b.94c7@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2113:003bd8a738d7 Date: 2017-07-14 17:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/003bd8a738d7/ Log: Merge fix of commit signalling fix diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1178,6 +1178,7 @@ _do_start_transaction(tl); continue_timer(); + STM_PSEGMENT->commit_if_not_atomic = false; STM_SEGMENT->nursery_mark = ((stm_char *)_stm_nursery_start + stm_get_transaction_length(tl)); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -549,8 +549,9 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic); if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + && pseg->transaction_state == TS_INEVITABLE && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Mon Jul 17 05:22:49 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:49 -0700 (PDT) Subject: [pypy-commit] stmgc c8-overheads-instrumentation: Merge latest changes from master Message-ID: <596c81e9.a198df0a.d2dda.8f1a@mx.google.com> Author: Tobias Weber Branch: c8-overheads-instrumentation Changeset: r2110:683f252182e3 Date: 2017-07-14 17:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/683f252182e3/ Log: Merge latest changes from master diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -379,6 +379,14 @@ static void readd_wb_executed_flags(void); static void check_all_write_barrier_flags(char *segbase, struct list_s *list); +static void signal_commit_to_inevitable_transaction(void) { + struct stm_priv_segment_info_s* inevitable_segement = get_inevitable_thread_segment(); + if (inevitable_segement != 0) { + // the inevitable thread is still running: set its "please commit" flag (is ignored by the inevitable thread if it is atomic) + inevitable_segement->commit_if_not_atomic = true; + } +} + static void wait_for_inevitable(void) { intptr_t detached = 0; @@ -395,6 +403,8 @@ try to detach an inevitable transaction regularly */ detached = fetch_detached_transaction(); if (detached == 0) { + // the inevitable trx was not detached or it was detached but is atomic + signal_commit_to_inevitable_transaction(); EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) goto wait_some_more; @@ -1168,6 +1178,7 @@ _do_start_transaction(tl); continue_timer(); + STM_PSEGMENT->commit_if_not_atomic = false; if (repeat_count == 0) { /* else, 'nursery_mark' was already set in abort_data_structures_from_segment_num() */ STM_SEGMENT->nursery_mark = ((stm_char *)_stm_nursery_start + @@ -1641,7 +1652,7 @@ void _stm_become_inevitable(const char *msg) { - int num_waits = 0; + int num_waits = 1; timing_become_inevitable(); @@ -1652,50 +1663,48 @@ if (msg != MSG_INEV_DONT_SLEEP) { dprintf(("become_inevitable: %s\n", msg)); - if (any_soon_finished_or_inevitable_thread_segment() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment()) { #if STM_TESTS /* for tests: another transaction */ stm_abort_transaction(); /* is already inevitable, abort */ #endif - bool timed_out = false; + signal_commit_to_inevitable_transaction(); s_mutex_lock(); if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested()) { + !safe_point_requested() && + num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); - if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, - 0.000054321)) - timed_out = true; + if (cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) { + num_waits++; + } } s_mutex_unlock(); - - if (timed_out) { - /* try to detach another inevitable transaction, but - only after waiting a bit. This is necessary to avoid - deadlocks in some situations, which are hopefully - not too common. We don't want two threads constantly - detaching each other. */ - intptr_t detached = fetch_detached_transaction(); - if (detached != 0) { - EMIT_WAIT_DONE(); - commit_fetched_detached_transaction(detached); - } - } - else { - num_waits++; + /* XXX try to detach another inevitable transaction, but + only after waiting a bit. This is necessary to avoid + deadlocks in some situations, which are hopefully + not too common. We don't want two threads constantly + detaching each other. */ + intptr_t detached = fetch_detached_transaction(); + if (detached != 0) { + EMIT_WAIT_DONE(); + commit_fetched_detached_transaction(detached); + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); } goto retry_from_start; } - EMIT_WAIT_DONE(); - if (!_validate_and_turn_inevitable()) - goto retry_from_start; + else { + EMIT_WAIT_DONE(); + if (!_validate_and_turn_inevitable()) { + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); + goto retry_from_start; + } + } } - else { - if (!_validate_and_turn_inevitable()) - return; + else if (!_validate_and_turn_inevitable()) { + return; } /* There may be a concurrent commit of a detached Tx going on. @@ -1707,6 +1716,7 @@ stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); + STM_PSEGMENT->commit_if_not_atomic = false; soon_finished_or_inevitable_thread_segment(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -169,6 +169,9 @@ /* For stm_enable_atomic() */ uintptr_t atomic_nesting_levels; + + // TODO signal flag that is checked in throw_away_nursery() for making immediate commit + bool commit_if_not_atomic; }; enum /* safe_point */ { diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -215,6 +215,7 @@ } } +// TODO write tests, verify is working, verify no overflows with adaptive mode uintptr_t stm_is_atomic(stm_thread_local_t *tl) { assert(STM_SEGMENT->running_thread == tl); @@ -228,14 +229,18 @@ return STM_PSEGMENT->atomic_nesting_levels; } +// max intptr_t value is 7FFFFFFFFFFFFFFF on 64-bit => larger than 2 * huge value #define HUGE_INTPTR_VALUE 0x3000000000000000L void stm_enable_atomic(stm_thread_local_t *tl) { if (!stm_is_atomic(tl)) { + // do for outermost atomic block only tl->self_or_0_if_atomic = 0; /* increment 'nursery_mark' by HUGE_INTPTR_VALUE, so that - stm_should_break_transaction() returns always false */ + stm_should_break_transaction() returns always false. + preserves the previous nursery_mark, unless it is < 0 + or >= huge value */ intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark; if (mark < 0) mark = 0; @@ -255,6 +260,7 @@ STM_PSEGMENT->atomic_nesting_levels--; if (STM_PSEGMENT->atomic_nesting_levels == 0) { + // revert changes by stm_enable_atomic only if we left the outermost atomic block tl->self_or_0_if_atomic = (intptr_t)tl; /* decrement 'nursery_mark' by HUGE_INTPTR_VALUE, to cancel what was done in stm_enable_atomic() */ diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -500,6 +500,14 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic); + if (pseg->commit_if_not_atomic + && pseg->transaction_state == TS_INEVITABLE + && pseg->pub.running_thread->self_or_0_if_atomic != 0) { + // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately + pseg->pub.nursery_mark = 0; + } + /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { wlog_t *item; diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -293,6 +293,19 @@ return false; } +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void) +{ + struct stm_priv_segment_info_s* segment; + int num; + for (num = 1; num < NB_SEGMENTS; num++) { + segment = get_priv_segment(num); + if (segment->transaction_state == TS_INEVITABLE) { + return segment; + } + } + return 0; +} + __attribute__((unused)) static bool _seems_to_be_running_transaction(void) { diff --git a/c8/stm/sync.h b/c8/stm/sync.h --- a/c8/stm/sync.h +++ b/c8/stm/sync.h @@ -29,6 +29,7 @@ static void release_thread_segment(stm_thread_local_t *tl); static void soon_finished_or_inevitable_thread_segment(void); static bool any_soon_finished_or_inevitable_thread_segment(void); +static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, From pypy.commits at gmail.com Mon Jul 17 05:22:50 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:50 -0700 (PDT) Subject: [pypy-commit] stmgc c8-overheads-instrumentation: Fix some timings that could be lost when aborting by publishing and resetting timer eagerly Message-ID: <596c81ea.a1abdf0a.72ac4.1806@mx.google.com> Author: Tobias Weber Branch: c8-overheads-instrumentation Changeset: r2111:364f9fb71d3e Date: 2017-07-14 17:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/364f9fb71d3e/ Log: Fix some timings that could be lost when aborting by publishing and resetting timer eagerly diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -468,8 +468,8 @@ #endif if (STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { - pause_timer(); - wait_for_inevitable(); // TODO may abort!! timing event lost + stop_timer_and_publish(STM_DURATION_VALIDATION); + wait_for_inevitable(); continue_timer(); goto retry_from_start; /* redo _stm_validate() now */ } @@ -559,14 +559,13 @@ OPT_ASSERT(yes); release_modification_lock_wr(STM_SEGMENT->segment_num); + + stop_timer_and_publish(STM_DURATION_VALIDATION); } else { - pause_timer(); + stop_timer_and_publish(STM_DURATION_VALIDATION); _validate_and_attach(new); - continue_timer(); } - - stop_timer_and_publish(STM_DURATION_VALIDATION); } /* ############# STM ############# */ @@ -1330,7 +1329,9 @@ if there is an inevitable tx running) */ bool was_inev = STM_PSEGMENT->transaction_state == TS_INEVITABLE; - pause_timer(); + // publish here because the validation may abort + stop_timer_and_publish_for_thread( + thread_local_for_logging, STM_DURATION_COMMIT_EXCEPT_GC); _validate_and_add_to_commit_log(); continue_timer(); @@ -1671,9 +1672,9 @@ signal_commit_to_inevitable_transaction(); s_mutex_lock(); - if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment() + && !safe_point_requested() + && num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); diff --git a/c8/stm/timing.h b/c8/stm/timing.h --- a/c8/stm/timing.h +++ b/c8/stm/timing.h @@ -27,6 +27,8 @@ #define pause_timer() clock_gettime(CLOCK_MONOTONIC_RAW, &stop); \ get_duration() +#define reset_timer() duration.tv_sec = 0; duration.tv_nsec = 0; + #define stm_duration_payload(duration_data) \ stm_timing_event_payload_data_t stm_duration_data = \ { .duration = &(duration_data) }; \ @@ -42,7 +44,8 @@ pause_timer() \ stm_duration_payload(duration) \ assert((thread_local) != NULL); \ - publish_event((thread_local), (event)) + publish_event((thread_local), (event)) \ + reset_timer() #define stop_timer_and_publish(event) \ stop_timer_and_publish_for_thread(STM_SEGMENT->running_thread, (event)) From pypy.commits at gmail.com Mon Jul 17 05:22:56 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:56 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Move transaction length update on abort Message-ID: <596c81f0.89e51c0a.6c323.6ced@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2114:7b939c8fa0b0 Date: 2017-07-14 12:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/7b939c8fa0b0/ Log: Move transaction length update on abort diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1551,8 +1551,6 @@ did_abort = 1; #endif - stm_transaction_length_handle_validation(pseg->pub.running_thread, true); - list_clear(pseg->objects_pointing_to_nursery); list_clear(pseg->old_objects_with_cards_set); LIST_FOREACH_R(pseg->large_overflow_objects, uintptr_t /*item*/, @@ -1583,6 +1581,8 @@ tl->self_or_0_if_atomic = (intptr_t)tl; /* clear the 'atomic' flag */ STM_PSEGMENT->atomic_nesting_levels = 0; + stm_transaction_length_handle_validation(tl, true); + if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); if (tl->mem_reset_on_abort) { From pypy.commits at gmail.com Mon Jul 17 05:23:10 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:10 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge fix for stop timer macro Message-ID: <596c81fe.8c99df0a.16f83.974b@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2117:93461fbaabd1 Date: 2017-07-14 19:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/93461fbaabd1/ Log: Merge fix for stop timer macro diff --git a/c8/stm/timing.h b/c8/stm/timing.h --- a/c8/stm/timing.h +++ b/c8/stm/timing.h @@ -8,6 +8,8 @@ #define start_timer() struct timespec start, stop; \ struct timespec duration = { .tv_sec = 0, .tv_nsec = 0 };\ uint32_t nanosec_diff, sec_diff; \ + stm_timing_event_payload_data_t stm_duration_data; \ + stm_timing_event_payload_t stm_duration_payload; \ continue_timer() /* Must use start_timer before using this macro. */ @@ -30,10 +32,9 @@ #define reset_timer() duration.tv_sec = 0; duration.tv_nsec = 0; #define stm_duration_payload(duration_data) \ - stm_timing_event_payload_data_t stm_duration_data = \ - { .duration = &(duration_data) }; \ - stm_timing_event_payload_t stm_duration_payload = \ - { STM_EVENT_PAYLOAD_DURATION, stm_duration_data }; + stm_duration_data.duration = &(duration_data); \ + stm_duration_payload.type = STM_EVENT_PAYLOAD_DURATION; \ + stm_duration_payload.data = stm_duration_data; #define publish_event(thread_local, event) \ (timing_enabled() ? \ From pypy.commits at gmail.com Mon Jul 17 05:23:12 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:12 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Fix missing type definitions for custom payload Message-ID: <596c8200.1db7df0a.e9b9b.94d6@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2118:e912698faa40 Date: 2017-07-14 20:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/e912698faa40/ Log: Fix missing type definitions for custom payload diff --git a/c8/stm/timing.h b/c8/stm/timing.h --- a/c8/stm/timing.h +++ b/c8/stm/timing.h @@ -1,5 +1,9 @@ #include +#define define_payload_types() \ + stm_timing_event_payload_data_t stm_duration_data; \ + stm_timing_event_payload_t stm_duration_payload; + #define continue_timer() clock_gettime(CLOCK_MONOTONIC_RAW, &start); /* Use raw monotonic time, i.e., solely based on local hardware (no NTP @@ -8,8 +12,7 @@ #define start_timer() struct timespec start, stop; \ struct timespec duration = { .tv_sec = 0, .tv_nsec = 0 };\ uint32_t nanosec_diff, sec_diff; \ - stm_timing_event_payload_data_t stm_duration_data; \ - stm_timing_event_payload_t stm_duration_payload; \ + define_payload_types() \ continue_timer() /* Must use start_timer before using this macro. */ @@ -59,5 +62,6 @@ #define publish_custom_value_event(double_value, event) \ set_payload((double_value)) \ + define_payload_types() \ stm_duration_payload(payload_value); \ publish_event(STM_SEGMENT->running_thread, (event)) From pypy.commits at gmail.com Mon Jul 17 05:22:57 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:22:57 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge latest instrumentation fixes Message-ID: <596c81f1.925b1c0a.e7e6f.456c@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2115:47ae8bbc2b7e Date: 2017-07-14 18:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/47ae8bbc2b7e/ Log: Merge latest instrumentation fixes diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -468,8 +468,8 @@ #endif if (STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { - pause_timer(); - wait_for_inevitable(); // TODO may abort!! timing event lost + stop_timer_and_publish(STM_DURATION_VALIDATION); + wait_for_inevitable(); continue_timer(); goto retry_from_start; /* redo _stm_validate() now */ } @@ -559,14 +559,13 @@ OPT_ASSERT(yes); release_modification_lock_wr(STM_SEGMENT->segment_num); + + stop_timer_and_publish(STM_DURATION_VALIDATION); } else { - pause_timer(); + stop_timer_and_publish(STM_DURATION_VALIDATION); _validate_and_attach(new); - continue_timer(); } - - stop_timer_and_publish(STM_DURATION_VALIDATION); } /* ############# STM ############# */ @@ -1327,7 +1326,9 @@ if there is an inevitable tx running) */ bool was_inev = STM_PSEGMENT->transaction_state == TS_INEVITABLE; - pause_timer(); + // publish here because the validation may abort + stop_timer_and_publish_for_thread( + thread_local_for_logging, STM_DURATION_COMMIT_EXCEPT_GC); _validate_and_add_to_commit_log(); continue_timer(); @@ -1656,9 +1657,9 @@ signal_commit_to_inevitable_transaction(); s_mutex_lock(); - if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment() + && !safe_point_requested() + && num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -781,16 +781,21 @@ static void major_do_validation_and_minor_collections(void) { + start_timer(); + int original_num = STM_SEGMENT->segment_num; long i; assert(_has_mutex()); /* including the sharing seg0 */ - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 0; i < NB_SEGMENTS; i++) { // TODO why is this strictly smaller than? ensure_gs_register(i); + pause_timer(); bool ok = _stm_validate(); + continue_timer(); + assert(get_priv_segment(i)->last_commit_log_entry->next == NULL || get_priv_segment(i)->last_commit_log_entry->next == INEV_RUNNING); if (!ok) { @@ -825,7 +830,9 @@ Collecting might fail due to invalid state. */ if (!must_abort()) { + pause_timer(); _do_minor_collection(/*commit=*/ false); + continue_timer(); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); } else { @@ -835,6 +842,8 @@ } ensure_gs_register(original_num); + + stop_timer_and_publish(STM_DURATION_MAJOR_GC_FULL); } diff --git a/c8/stm/timing.h b/c8/stm/timing.h --- a/c8/stm/timing.h +++ b/c8/stm/timing.h @@ -27,6 +27,8 @@ #define pause_timer() clock_gettime(CLOCK_MONOTONIC_RAW, &stop); \ get_duration() +#define reset_timer() duration.tv_sec = 0; duration.tv_nsec = 0; + #define stm_duration_payload(duration_data) \ stm_timing_event_payload_data_t stm_duration_data = \ { .duration = &(duration_data) }; \ @@ -42,7 +44,8 @@ pause_timer() \ stm_duration_payload(duration) \ assert((thread_local) != NULL); \ - publish_event((thread_local), (event)) + publish_event((thread_local), (event)) \ + reset_timer() #define stop_timer_and_publish(event) \ stop_timer_and_publish_for_thread(STM_SEGMENT->running_thread, (event)) From pypy.commits at gmail.com Mon Jul 17 05:23:08 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:08 -0700 (PDT) Subject: [pypy-commit] stmgc c8-overheads-instrumentation: Fix redefinition of payload data when using stop timer macro more than once in a function Message-ID: <596c81fc.0594df0a.4575a.cb68@mx.google.com> Author: Tobias Weber Branch: c8-overheads-instrumentation Changeset: r2116:59e0bec0fd9b Date: 2017-07-14 19:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/59e0bec0fd9b/ Log: Fix redefinition of payload data when using stop timer macro more than once in a function diff --git a/c8/stm/timing.h b/c8/stm/timing.h --- a/c8/stm/timing.h +++ b/c8/stm/timing.h @@ -8,6 +8,8 @@ #define start_timer() struct timespec start, stop; \ struct timespec duration = { .tv_sec = 0, .tv_nsec = 0 };\ uint32_t nanosec_diff, sec_diff; \ + stm_timing_event_payload_data_t stm_duration_data; \ + stm_timing_event_payload_t stm_duration_payload; \ continue_timer() /* Must use start_timer before using this macro. */ @@ -30,10 +32,9 @@ #define reset_timer() duration.tv_sec = 0; duration.tv_nsec = 0; #define stm_duration_payload(duration_data) \ - stm_timing_event_payload_data_t stm_duration_data = \ - { .duration = &(duration_data) }; \ - stm_timing_event_payload_t stm_duration_payload = \ - { STM_EVENT_PAYLOAD_DURATION, stm_duration_data }; + stm_duration_data.duration = &(duration_data); \ + stm_duration_payload.type = STM_EVENT_PAYLOAD_DURATION; \ + stm_duration_payload.data = stm_duration_data; #define publish_event(thread_local, event) \ (timing_enabled() ? \ From pypy.commits at gmail.com Mon Jul 17 05:23:19 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:19 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Introduce slow start mechanism Message-ID: <596c8207.0594df0a.4575a.cb77@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2122:f6be3fc14929 Date: 2017-07-16 13:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/f6be3fc14929/ Log: Introduce slow start mechanism diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -49,6 +49,10 @@ if (previous < 1) { new = previous * multiplier; } + if (tl->linear_transaction_length_increment != 0) { + // thread had to abort before: slow start + set_backoff(tl, new); + } } else { // not abort and backoff != 0 // in backoff, linear increase up to 1 if (previous < 1) { From pypy.commits at gmail.com Mon Jul 17 05:23:14 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:14 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Move transaction length update for aborts to validation Message-ID: <596c8202.a1abdf0a.72ac4.1824@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2119:dbea548c4c6e Date: 2017-07-15 18:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/dbea548c4c6e/ Log: Move transaction length update for aborts to validation diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -347,6 +347,9 @@ } if (thread_local_for_logging != NULL) { + if (needs_abort) { + stm_transaction_length_handle_validation(thread_local_for_logging, true); + } stop_timer_and_publish_for_thread( thread_local_for_logging, STM_DURATION_VALIDATION); } @@ -1582,8 +1585,6 @@ tl->self_or_0_if_atomic = (intptr_t)tl; /* clear the 'atomic' flag */ STM_PSEGMENT->atomic_nesting_levels = 0; - stm_transaction_length_handle_validation(tl, true); - if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); if (tl->mem_reset_on_abort) { From pypy.commits at gmail.com Mon Jul 17 05:23:15 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:15 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Fix and refactor backoff mechanism Message-ID: <596c8203.8f871c0a.86fc9.f8a9@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2120:adf35813ae8d Date: 2017-07-15 18:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/adf35813ae8d/ Log: Fix and refactor backoff mechanism diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -23,8 +23,17 @@ // corresponds to ~700 bytes nursery fill #define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.00000001) +#define BACKOFF_MULTIPLIER (0.05 / STM_MIN_RELATIVE_TRANSACTION_LENGTH) -static double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { +static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { + // the shorter the trx, the more backoff: 100 at min trx length, proportional decrease to 5 at max trx length (think a/x + b = backoff) + tl->transaction_length_backoff = + (int)(1 / (BACKOFF_MULTIPLIER * rel_trx_len) + 5); + // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); + tl->linear_transaction_length_increment = rel_trx_len; +} + +static inline double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { const int multiplier = 100; double previous = tl->relative_transaction_length; double new = previous; @@ -34,10 +43,7 @@ } else { new = STM_MIN_RELATIVE_TRANSACTION_LENGTH; } - // the shorter the trx, the more backoff: 1000 at min trx length, proportional decrease to 1 at max trx length (think a/x + b = backoff) - tl->transaction_length_backoff = (int)(1 / (100000000 * new) + 5); - // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); - tl->linear_transaction_length_increment = new; + set_backoff(tl, new); } else if (tl->transaction_length_backoff == 0) { // backoff counter is zero, exponential increase up to 1 if (previous < 1) { @@ -53,11 +59,11 @@ return new; } -static void stm_transaction_length_handle_validation(stm_thread_local_t *tl, bool aborts) { +static inline void stm_transaction_length_handle_validation(stm_thread_local_t *tl, bool aborts) { tl->relative_transaction_length = get_new_transaction_length(tl, aborts); } -static uintptr_t stm_get_transaction_length(stm_thread_local_t *tl) { +static inline uintptr_t stm_get_transaction_length(stm_thread_local_t *tl) { double relative_additional_length = tl->relative_transaction_length; publish_custom_value_event( relative_additional_length, STM_SINGLE_THREAD_MODE_ADAPTIVE); diff --git a/c8/stm/nursery.h b/c8/stm/nursery.h --- a/c8/stm/nursery.h +++ b/c8/stm/nursery.h @@ -59,7 +59,7 @@ static uint32_t stm_max_conflicts; static uint32_t stm_global_conflicts; -static void stm_transaction_length_handle_validation(stm_thread_local_t *tl, bool aborts); -static uintptr_t stm_get_transaction_length(stm_thread_local_t *tl); +static inline void stm_transaction_length_handle_validation(stm_thread_local_t *tl, bool aborts); +static inline uintptr_t stm_get_transaction_length(stm_thread_local_t *tl); #endif From pypy.commits at gmail.com Mon Jul 17 05:23:17 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:17 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Improve backoff computation Message-ID: <596c8205.a29adf0a.2581e.fa96@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2121:de7da0f0b0ad Date: 2017-07-16 00:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/de7da0f0b0ad/ Log: Improve backoff computation diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -23,12 +23,12 @@ // corresponds to ~700 bytes nursery fill #define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.00000001) -#define BACKOFF_MULTIPLIER (0.05 / STM_MIN_RELATIVE_TRANSACTION_LENGTH) +#define BACKOFF_MULTIPLIER (20 / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { // the shorter the trx, the more backoff: 100 at min trx length, proportional decrease to 5 at max trx length (think a/x + b = backoff) tl->transaction_length_backoff = - (int)(1 / (BACKOFF_MULTIPLIER * rel_trx_len) + 5); + (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + 5); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); tl->linear_transaction_length_increment = rel_trx_len; } From pypy.commits at gmail.com Mon Jul 17 05:23:21 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:21 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Optimize min/max trx length Message-ID: <596c8209.01571c0a.a2bad.8618@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2123:d62e76b278bd Date: 2017-07-16 19:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/d62e76b278bd/ Log: Optimize min/max trx length diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -18,11 +18,11 @@ #define DEFAULT_FILL_MARK_NURSERY_BYTES (NURSERY_SIZE / 4) // #define LARGE_FILL_MARK_NURSERY_BYTES DEFAULT_FILL_MARK_NURSERY_BYTES -#define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000L +#define LARGE_FILL_MARK_NURSERY_BYTES 0x10000000L // #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L -// corresponds to ~700 bytes nursery fill -#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.00000001) +// corresponds to ~270 bytes nursery fill +#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) #define BACKOFF_MULTIPLIER (20 / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { From pypy.commits at gmail.com Mon Jul 17 05:23:27 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:27 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Update trx length on commit and abort only Message-ID: <596c820f.d685df0a.743d8.84a5@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2124:2f4291869a47 Date: 2017-07-10 16:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/2f4291869a47/ Log: Update trx length on commit and abort only diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -347,7 +347,6 @@ } if (thread_local_for_logging != NULL) { - stm_transaction_length_handle_validation(thread_local_for_logging, needs_abort); stop_timer_and_publish_for_thread( thread_local_for_logging, STM_DURATION_VALIDATION); } @@ -1378,6 +1377,8 @@ s_mutex_unlock(); + stm_transaction_length_handle_validation(thread_local_for_logging, false); + stop_timer_and_publish_for_thread( thread_local_for_logging, STM_DURATION_COMMIT_EXCEPT_GC); @@ -1549,6 +1550,8 @@ did_abort = 1; #endif + stm_transaction_length_handle_validation(pseg->pub.running_thread, true); + list_clear(pseg->objects_pointing_to_nursery); list_clear(pseg->old_objects_with_cards_set); LIST_FOREACH_R(pseg->large_overflow_objects, uintptr_t /*item*/, From pypy.commits at gmail.com Mon Jul 17 05:23:32 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:32 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Move transaction length update on abort Message-ID: <596c8214.b485df0a.e6b81.50c1@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2127:77cbbb3d1a97 Date: 2017-07-14 12:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/77cbbb3d1a97/ Log: Move transaction length update on abort diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1551,8 +1551,6 @@ did_abort = 1; #endif - stm_transaction_length_handle_validation(pseg->pub.running_thread, true); - list_clear(pseg->objects_pointing_to_nursery); list_clear(pseg->old_objects_with_cards_set); LIST_FOREACH_R(pseg->large_overflow_objects, uintptr_t /*item*/, @@ -1583,6 +1581,8 @@ tl->self_or_0_if_atomic = (intptr_t)tl; /* clear the 'atomic' flag */ STM_PSEGMENT->atomic_nesting_levels = 0; + stm_transaction_length_handle_validation(tl, true); + if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); if (tl->mem_reset_on_abort) { From pypy.commits at gmail.com Mon Jul 17 05:23:34 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:34 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge latest instrumentation fixes Message-ID: <596c8216.a3b0df0a.88bd.39b1@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2128:de8f34537c9b Date: 2017-07-14 17:50 +0200 http://bitbucket.org/pypy/stmgc/changeset/de8f34537c9b/ Log: Merge latest instrumentation fixes diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -468,8 +468,8 @@ #endif if (STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { - pause_timer(); - wait_for_inevitable(); // TODO may abort!! timing event lost + stop_timer_and_publish(STM_DURATION_VALIDATION); + wait_for_inevitable(); continue_timer(); goto retry_from_start; /* redo _stm_validate() now */ } @@ -559,14 +559,13 @@ OPT_ASSERT(yes); release_modification_lock_wr(STM_SEGMENT->segment_num); + + stop_timer_and_publish(STM_DURATION_VALIDATION); } else { - pause_timer(); + stop_timer_and_publish(STM_DURATION_VALIDATION); _validate_and_attach(new); - continue_timer(); } - - stop_timer_and_publish(STM_DURATION_VALIDATION); } /* ############# STM ############# */ @@ -1327,7 +1326,9 @@ if there is an inevitable tx running) */ bool was_inev = STM_PSEGMENT->transaction_state == TS_INEVITABLE; - pause_timer(); + // publish here because the validation may abort + stop_timer_and_publish_for_thread( + thread_local_for_logging, STM_DURATION_COMMIT_EXCEPT_GC); _validate_and_add_to_commit_log(); continue_timer(); @@ -1656,9 +1657,9 @@ signal_commit_to_inevitable_transaction(); s_mutex_lock(); - if (any_soon_finished_or_inevitable_thread_segment() && - !safe_point_requested() && - num_waits <= NB_SEGMENTS) { + if (any_soon_finished_or_inevitable_thread_segment() + && !safe_point_requested() + && num_waits <= NB_SEGMENTS) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -774,16 +774,21 @@ static void major_do_validation_and_minor_collections(void) { + start_timer(); + int original_num = STM_SEGMENT->segment_num; long i; assert(_has_mutex()); /* including the sharing seg0 */ - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 0; i < NB_SEGMENTS; i++) { // TODO why is this strictly smaller than? ensure_gs_register(i); + pause_timer(); bool ok = _stm_validate(); + continue_timer(); + assert(get_priv_segment(i)->last_commit_log_entry->next == NULL || get_priv_segment(i)->last_commit_log_entry->next == INEV_RUNNING); if (!ok) { @@ -818,7 +823,9 @@ Collecting might fail due to invalid state. */ if (!must_abort()) { + pause_timer(); _do_minor_collection(/*commit=*/ false); + continue_timer(); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); } else { @@ -828,6 +835,8 @@ } ensure_gs_register(original_num); + + stop_timer_and_publish(STM_DURATION_MAJOR_GC_FULL); } diff --git a/c8/stm/timing.h b/c8/stm/timing.h --- a/c8/stm/timing.h +++ b/c8/stm/timing.h @@ -27,6 +27,8 @@ #define pause_timer() clock_gettime(CLOCK_MONOTONIC_RAW, &stop); \ get_duration() +#define reset_timer() duration.tv_sec = 0; duration.tv_nsec = 0; + #define stm_duration_payload(duration_data) \ stm_timing_event_payload_data_t stm_duration_data = \ { .duration = &(duration_data) }; \ @@ -42,7 +44,8 @@ pause_timer() \ stm_duration_payload(duration) \ assert((thread_local) != NULL); \ - publish_event((thread_local), (event)) + publish_event((thread_local), (event)) \ + reset_timer() #define stop_timer_and_publish(event) \ stop_timer_and_publish_for_thread(STM_SEGMENT->running_thread, (event)) From pypy.commits at gmail.com Mon Jul 17 05:23:29 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:29 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge commit signalling fix Message-ID: <596c8211.8c99df0a.16f83.9764@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2125:c1b97c862a98 Date: 2017-07-13 21:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/c1b97c862a98/ Log: Merge commit signalling fix diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1178,6 +1178,7 @@ _do_start_transaction(tl); continue_timer(); + STM_PSEGMENT->commit_if_not_atomic = false; STM_SEGMENT->nursery_mark = ((stm_char *)_stm_nursery_start + stm_get_transaction_length(tl)); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -542,8 +542,9 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; + assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic) if (pseg->commit_if_not_atomic - // && pseg->transaction_state == TS_INEVITABLE // TODO why does this break the mechanism? + && pseg->transaction_state == TS_INEVITABLE && pseg->pub.running_thread->self_or_0_if_atomic != 0) { // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately pseg->pub.nursery_mark = 0; From pypy.commits at gmail.com Mon Jul 17 05:23:31 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 17 Jul 2017 02:23:31 -0700 (PDT) Subject: [pypy-commit] stmgc c8-adaptive-trx-length-per-thread: Merge fix for syntax error Message-ID: <596c8213.0d421c0a.22862.bd75@mx.google.com> Author: Tobias Weber Branch: c8-adaptive-trx-length-per-thread Changeset: r2126:ff769635407d Date: 2017-07-13 22:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/ff769635407d/ Log: Merge fix for syntax error diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -542,7 +542,7 @@ pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; pseg->pub.nursery_mark -= nursery_used; - assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic) + assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic); if (pseg->commit_if_not_atomic && pseg->transaction_state == TS_INEVITABLE && pseg->pub.running_thread->self_or_0_if_atomic != 0) { From pypy.commits at gmail.com Mon Jul 17 09:11:29 2017 From: pypy.commits at gmail.com (fijal) Date: Mon, 17 Jul 2017 06:11:29 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add the pdf too Message-ID: <596cb781.07bf1c0a.5d00.c0db@mx.google.com> Author: fijal Branch: extradoc Changeset: r5822:3af5b24e411a Date: 2017-07-17 16:11 +0300 http://bitbucket.org/pypy/extradoc/changeset/3af5b24e411a/ Log: add the pdf too diff --git a/talk/pyconru-2017/talk.key b/talk/pyconru-2017/talk.key index ce8110f8dee6bb6dfffe662711c6ad55d6f5b248..37836b52ca7b98248f5fc989e1473f21fdda89ad GIT binary patch [cut] diff --git a/talk/pyconru-2017/talk.pdf b/talk/pyconru-2017/talk.pdf new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..d236e50aab87ff12e4c606fbbcca652d8bbf2298 GIT binary patch [cut] From pypy.commits at gmail.com Mon Jul 17 14:47:50 2017 From: pypy.commits at gmail.com (mjacob) Date: Mon, 17 Jul 2017 11:47:50 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: hg merge py3.5 Message-ID: <596d0656.4e921c0a.2f60e.2e1e@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91914:73c89dbe9896 Date: 2017-07-17 20:47 +0200 http://bitbucket.org/pypy/pypy/changeset/73c89dbe9896/ Log: hg merge py3.5 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -748,14 +748,17 @@ if space.isinstance_w(w_source, space.w_unicode): raise oefmt(space.w_TypeError, "cannot convert a (unicode) str object to bytes") + return _from_byte_sequence(space, w_source) - # sequence of bytes + +def _from_byte_sequence(space, w_source): + # Split off in a separate function for the JIT's benefit w_result = space.appexec([w_source], """(seq): result = bytearray() for i in seq: result.append(i) return result""") - return w_result.getdata() + return ''.join(w_result.getdata()) W_BytesObject.typedef = TypeDef( "bytes", None, None, "read", From pypy.commits at gmail.com Tue Jul 18 04:27:54 2017 From: pypy.commits at gmail.com (Dodan) Date: Tue, 18 Jul 2017 01:27:54 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-sendmsg-recvmsg: Sendmsg and recvmsg implemented. Some test fail. Some mem leaks Message-ID: <596dc68a.d6bd1c0a.306c4.b2b1@mx.google.com> Author: Dodan Mihai Branch: py3.5-sendmsg-recvmsg Changeset: r91915:4396f9d022d5 Date: 2017-07-18 11:25 +0300 http://bitbucket.org/pypy/pypy/changeset/4396f9d022d5/ Log: Sendmsg and recvmsg implemented. Some test fail. Some mem leaks diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -34,6 +34,7 @@ ntohs ntohl htons htonl inet_aton inet_ntoa inet_pton inet_ntop getaddrinfo getnameinfo getdefaulttimeout setdefaulttimeout + CMSG_SPACE CMSG_LEN """.split(): if (name in ('inet_pton', 'inet_ntop', 'socketpair') and diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -327,6 +327,28 @@ for (family, socktype, protocol, canonname, addr) in lst] return space.newlist(lst1) + at unwrap_spec(size=int) +def CMSG_SPACE(space, size): + if size < 0: + raise oefmt(space.w_OverflowError, + "CMSG_SPACE() argument out of range") + retval = rsocket.CMSG_SPACE(size) + if retval == 0: + raise oefmt(space.w_OverflowError, + "CMSG_SPACE() argument out of range") + return space.newint(retval) + + at unwrap_spec(len=int) +def CMSG_LEN(space, len): + if len < 0: + raise oefmt(space.w_OverflowError, + "CMSG_LEN() argument out of range") + retval = rsocket.CMSG_LEN(len) + if retval == 0: + raise oefmt(space.w_OverflowError, + "CMSG_LEN() argument out of range") + return space.newint(retval) + def getdefaulttimeout(space): """getdefaulttimeout() -> timeout diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -285,6 +285,8 @@ except SocketError as e: raise converted_error(space, e) + + def close_w(self, space): """close() @@ -446,6 +448,38 @@ converted_error(space, e, eintr_retry=True) return space.newtuple([space.newbytes(data), w_addr]) + @unwrap_spec(message_size=int, ancbufsize=int, flags=int) + def recvmsg_w(self,space,message_size, ancbufsize = 0, flags = 0): + if (message_size < 0): + raise oefmt(space.w_ValueError, "negative buffer size in recvmsg()") + if ancbufsize < 0: + raise oefmt(space.w_ValueError, "invalid ancillary data buffer length") + try: + tuple = self.sock.recvmsg(message_size, ancbufsize, flags) + message = space.newbytes(tuple[0]) + # print(tuple[0]) + list = [] + for l in tuple[1]: + tup = space.newtuple([space.newint(l[0]), space.newint(l[1]), space.newbytes(l[2])]) + list.append(tup) + + anc = space.newlist(list) + + flag = space.newint(tuple[2]) + if (tuple[3] is not None): + address = addr_as_object(tuple[3], self.sock.fd, space) + else: + address = space.w_None + + rettup = space.newtuple([message, anc, flag, address]) + return rettup + except SocketError as e: + converted_error(space, e, eintr_retry=True) + + + + + @unwrap_spec(data='bufferstr', flags=int) def send_w(self, space, data, flags=0): """send(data[, flags]) -> count @@ -501,6 +535,110 @@ converted_error(space, e, eintr_retry=True) return space.newint(count) + #@unwrap_spec(data='bufferstr', flags = int) + def sendmsg_w(self, space, w_data, w_ancillary=None, w_flags=None ,w_address=None): + """sendmsg(messages, [ancillaries, [flags, [address]]]) + """ + flags = 0 + if space.is_none(w_flags) is False: + flags = space.int_w(w_flags) + + address = None + if space.is_none(w_address) is False: + address = self.addr_from_object(space, w_address) + + data = [] + if (w_data.typedef.name == 'list'): + for i in w_data.getitems(): + if space.isinstance_w(i,space.w_bytes): + data.append(space.bytes_w(i)) + else: + if (i.typedef.name == 'array.array'): + data.append(space.bytes_w(i.descr_tobytes(space))) + else: + raise oefmt(space.w_TypeError, "a bytes-like object is required") + else: + while True: + try: + if (space.is_generator(w_data) is False): + raise oefmt(space.w_TypeError, "sendmsg(): argument 1 must be iterable") + i = space.next(w_data) + if space.isinstance_w(i, space.w_bytes): + data.append(space.bytes_w(i)) + else: + if (i.typedef.name == 'array.array'): + data.append(space.bytes_w(i.descr_tobytes(space))) + else: + raise oefmt(space.w_TypeError, "a bytes-like object is required") + except OperationError as e: + if not e.match(space,space.w_StopIteration): + raise + break + ancillary = [] + + if w_ancillary is not None: + if (space.isinstance_w(w_ancillary,space.w_list)): + for i in w_ancillary.getitems(): + if (space.isinstance_w(i, space.w_tuple) is False): + raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence") + if (space.len_w(i) == 3): + level = space.int_w(space.getitem(i, space.newint(0))) + type = space.int_w(space.getitem(i, space.newint(1))) + if (space.getitem(i, space.newint(2)).typedef.name == 'array.array'): + cont = space.bytes_w(space.getitem(i, space.newint(2)).descr_tobytes(space)) + else: + if (space.isinstance_w(space.getitem(i, space.newint(2)), space.w_bytes)): + cont = space.bytes_w(space.getitem(i, space.newint(2))) + else: + raise oefmt(space.w_TypeError,"a bytes-like object is required") + tup = (level, type, cont) + ancillary.append(tup) + else: + raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence of length 3") + + else: + while True: + try: + if (space.is_generator(w_ancillary) is False): + raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence") + i = space.next(w_ancillary) + if (space.isinstance_w(i, space.w_tuple) is False): + raise oefmt(space.w_TypeError, + "[sendmsg() ancillary data items]() argument must be sequence of length 3") + if (space.len_w(i) != 3): + raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence of length 3") + except OperationError as e: + if not e.match(space,space.w_StopIteration): + raise + break + level = space.int_w(space.getitem(i, space.newint(0))) + type = space.int_w(space.getitem(i, space.newint(1))) + if (space.getitem(i, space.newint(2)).typedef.name == 'array.array'): + cont = space.bytes_w(space.getitem(i, space.newint(2)).descr_tobytes(space)) + else: + if (space.isinstance_w(space.getitem(i, space.newint(2)), space.w_bytes)): + cont = space.bytes_w(space.getitem(i, space.newint(2))) + else: + raise oefmt(space.w_TypeError, "a bytes-like object is required") + tup = (level, type, cont) + ancillary.append(tup) + + try: + count = self.sock.sendmsg(data, ancillary, flags, address) + if count < 0: + if (count == -1000): + raise oefmt(space.w_OSError, "sending multiple control messages not supported") + if (count == -1001): + raise oefmt(space.w_OSError, "ancillary data item too large") + if (count == -1002): + raise oefmt(space.w_OSError, "too much ancillary data") + + return space.newint(count) + except SocketError as e: + converted_error(space, e, eintr_retry=True) + + + @unwrap_spec(flag=int) def setblocking_w(self, flag): """setblocking(flag) @@ -772,7 +910,7 @@ socketmethodnames = """ _accept bind close connect connect_ex fileno detach getpeername getsockname getsockopt gettimeout listen -recv recvfrom send sendall sendto setblocking +recv recvfrom recvmsg send sendall sendto sendmsg setblocking setsockopt settimeout shutdown _reuse _drop recv_into recvfrom_into """.split() if hasattr(rsocket._c, 'WSAIoctl'): @@ -813,6 +951,8 @@ sendall(data[, flags]) -- send all data send(data[, flags]) -- send data, may not send all of it sendto(data[, flags], addr) -- send data to a given address +sendmsg(messages[, ancillary[, flags[, address]]]) -- send data and ancillary payload in a packet. May specifiy flags or the address +recvmsg(message_size,[ ancillary_size,[ flags]]) -- receive data and ancillary payload. Return a tup of message, ancdata, flags and address setblocking(0 | 1) -- set or clear the blocking I/O flag setsockopt(level, optname, value) -- set socket options settimeout(None | float) -- set or clear the timeout diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem.rffi import CCHARP +from rpython.rlib import jit from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform as target_platform @@ -190,6 +191,8 @@ IPX_TYPE +SCM_RIGHTS + POLLIN POLLPRI POLLOUT POLLERR POLLHUP POLLNVAL POLLRDNORM POLLRDBAND POLLWRNORM POLLWEBAND POLLMSG @@ -260,6 +263,7 @@ sockaddr_ptr = lltype.Ptr(lltype.ForwardReference()) addrinfo_ptr = lltype.Ptr(lltype.ForwardReference()) + # struct types CConfig.sockaddr = platform.Struct('struct sockaddr', [('sa_family', rffi.INT), @@ -343,6 +347,675 @@ [('ifr_ifindex', rffi.INT), ('ifr_name', rffi.CFixedArray(rffi.CHAR, 8))]) +# insert handler for sendmsg / recvmsg here +if _POSIX: + includes = ['stddef.h', + 'sys/socket.h', + 'unistd.h', + 'string.h', + 'stdlib.h', + 'errno.h', + 'limits.h', + 'stdio.h', + 'sys/types.h'] + separate_module_sources = [''' + + //defines for recvmsg + #define SUCCESS 0 + #define BAD_MSG_SIZE_GIVEN -1 + #define BAD_ANC_SIZE_GIVEN -2 + #define WOULD_BLOCK -3 + #define AGAIN -4 + #define BADDESC -5 + #define CON_REF -6 + #define FAULT -7 + #define INTR -8 + #define NOMEM -9 + #define NOTCONN -10 + #define NOTSOCK -11 + #define MAL_ANC -12 + + //defines for sendmsg + #define MUL_MSGS_NOT_SUP -1000 + #define ANC_DATA_TOO_LARGE -1001 + #define ANC_DATA_TOO_LARGEX -1002 + + #define MSG_IOVLEN 1 // CPyhton has hardcoded this as well. + #if INT_MAX > 0x7fffffff + #define SOCKLEN_T_LIMIT 0x7fffffff + #else + #define SOCKLEN_T_LIMIT INT_MAX + #endif + + + #ifdef CMSG_SPACE + static int + cmsg_min_space(struct msghdr *msg, struct cmsghdr *cmsgh, size_t space) + { + size_t cmsg_offset; + static const size_t cmsg_len_end = (offsetof(struct cmsghdr, cmsg_len) + + sizeof(cmsgh->cmsg_len)); + + /* Note that POSIX allows msg_controllen to be of signed type. */ + if (cmsgh == NULL || msg->msg_control == NULL) + return 0; + /* Note that POSIX allows msg_controllen to be of a signed type. This is + annoying under OS X as it's unsigned there and so it triggers a + tautological comparison warning under Clang when compared against 0. + Since the check is valid on other platforms, silence the warning under + Clang. */ + #ifdef __clang__ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wtautological-compare" + #endif + #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wtype-limits" + #endif + if (msg->msg_controllen < 0) + return 0; + #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))) + #pragma GCC diagnostic pop + #endif + #ifdef __clang__ + #pragma clang diagnostic pop + #endif + if (space < cmsg_len_end) + space = cmsg_len_end; + cmsg_offset = (char *)cmsgh - (char *)msg->msg_control; + return (cmsg_offset <= (size_t)-1 - space && + cmsg_offset + space <= msg->msg_controllen); + } + #endif + + #ifdef CMSG_LEN + + /* If pointer CMSG_DATA(cmsgh) is in buffer msg->msg_control, set + *space to number of bytes following it in the buffer and return + true; otherwise, return false. Assumes cmsgh, msg->msg_control and + msg->msg_controllen are valid. */ + static int + get_cmsg_data_space(struct msghdr *msg, struct cmsghdr *cmsgh, size_t *space) + { + size_t data_offset; + char *data_ptr; + + if ((data_ptr = (char *)CMSG_DATA(cmsgh)) == NULL) + return 0; + data_offset = data_ptr - (char *)msg->msg_control; + if (data_offset > msg->msg_controllen) + return 0; + *space = msg->msg_controllen - data_offset; + return 1; + } + + /* If cmsgh is invalid or not contained in the buffer pointed to by + msg->msg_control, return -1. If cmsgh is valid and its associated + data is entirely contained in the buffer, set *data_len to the + length of the associated data and return 0. If only part of the + associated data is contained in the buffer but cmsgh is otherwise + valid, set *data_len to the length contained in the buffer and + return 1. */ + static int + get_cmsg_data_len(struct msghdr *msg, struct cmsghdr *cmsgh, size_t *data_len) + { + size_t space, cmsg_data_len; + + if (!cmsg_min_space(msg, cmsgh, CMSG_LEN(0)) || + cmsgh->cmsg_len < CMSG_LEN(0)) + return -1; + cmsg_data_len = cmsgh->cmsg_len - CMSG_LEN(0); + if (!get_cmsg_data_space(msg, cmsgh, &space)) + return -1; + if (space >= cmsg_data_len) { + *data_len = cmsg_data_len; + return 0; + } + *data_len = space; + return 1; + } + #endif /* CMSG_LEN */ + + struct recvmsg_info + { + int error_code; + struct sockaddr* address; + socklen_t addrlen; + int* length_of_messages; + char** messages; + int no_of_messages; + int size_of_ancillary; + int* levels; + int* types; + char** file_descr; + int* descr_per_ancillary; + int flags; + }; + + + RPY_EXTERN + int recvmsg_implementation( + int socket_fd, + int message_size, + int ancillary_size, + int flags, + struct sockaddr* address, + socklen_t* addrlen, + int** length_of_messages, + char** messages, + int* no_of_messages, + int* size_of_ancillary, + int** levels, + int** types, + char** file_descr, + int** descr_per_ancillary, + int* flag) + + { + + struct sockaddr* recvd_address; + socklen_t recvd_addrlen; + struct msghdr msg = {0}; + void *controlbuf = NULL; + struct cmsghdr *cmsgh; + int cmsg_status; + struct iovec iov; + struct recvmsg_info* retinfo; + int error_flag; + int cmsgdatalen = 0; + + //allocation flags for failure + int iov_alloc = 0; + int anc_alloc = 0; + + retinfo = (struct recvmsg_info*) malloc(sizeof(struct recvmsg_info)); + /* + if (message_size < 0){ + error_flag = BAD_MSG_SIZE_GIVEN; + goto fail; + } + */ + if (ancillary_size > SOCKLEN_T_LIMIT){ + error_flag = BAD_ANC_SIZE_GIVEN; + goto fail; + } + + + iov.iov_base = (char*) malloc(message_size); + memset(iov.iov_base, 0, message_size); + iov.iov_len = message_size; + controlbuf = malloc(ancillary_size); + recvd_addrlen = sizeof(struct sockaddr); + recvd_address = (struct sockaddr*) malloc(recvd_addrlen); + + memset(recvd_address, 0,recvd_addrlen); + + msg.msg_name = recvd_address; + msg.msg_namelen = recvd_addrlen; + msg.msg_iov = &iov; + msg.msg_iovlen = MSG_IOVLEN; + msg.msg_control = controlbuf; + msg.msg_controllen = ancillary_size; + + retinfo->address = msg.msg_name; + retinfo->length_of_messages = (int*) malloc (MSG_IOVLEN * sizeof(int)); + retinfo->no_of_messages = 1; + retinfo->messages = (char**) malloc (MSG_IOVLEN * sizeof(char*)); + retinfo->messages[0] = msg.msg_iov->iov_base; + + iov_alloc = 1; + + ssize_t bytes_recvd = 0; + + bytes_recvd = recvmsg(socket_fd, &msg, flags); + + if (bytes_recvd < 0){ + switch (errno){ + case EAGAIN: + error_flag = -3; + break; + case EBADF: + error_flag = -5; + break; + case ECONNREFUSED: + error_flag = -6; + break; + case EFAULT: + error_flag = -7; + break; + case EINTR: + error_flag = -8; + break; + case ENOMEM: + error_flag = -9; + break; + case ENOTCONN: + error_flag = -10; + break; + case ENOTSOCK: + error_flag = -11; + break; + } + + goto fail; + } + + retinfo->addrlen = (socklen_t) msg.msg_namelen; + retinfo->length_of_messages[0] = msg.msg_iov->iov_len; + + + int anc_counter = 0; + /* + struct recv_list* first_item = (struct recv_list*) malloc(sizeof(struct recv_list)); + struct recv_list* iter = first_item; + */ + for (cmsgh = ((msg.msg_controllen > 0) ? CMSG_FIRSTHDR(&msg) : NULL); + cmsgh != NULL; cmsgh = CMSG_NXTHDR(&msg, cmsgh)) { + + anc_counter++; + } + + retinfo->size_of_ancillary = anc_counter; + retinfo->file_descr = (char**) malloc (anc_counter * sizeof(char*)); + retinfo->levels = (int*) malloc(anc_counter * sizeof(int)); + retinfo->types = (int*) malloc(anc_counter * sizeof(int)); + retinfo->descr_per_ancillary = (int*) malloc(anc_counter * sizeof(int)); + anc_alloc = 1; + + int i=0; + for (cmsgh = ((msg.msg_controllen > 0) ? CMSG_FIRSTHDR(&msg) : NULL); + cmsgh != NULL; cmsgh = CMSG_NXTHDR(&msg, cmsgh)) { + size_t local_size = 0; + cmsg_status = get_cmsg_data_len(&msg, cmsgh, &local_size); + if (cmsg_status !=0 ){ + error_flag = MAL_ANC; + goto err_closefds; + } + retinfo->file_descr[i] = (char*) malloc(local_size); + memcpy(retinfo->file_descr[i], CMSG_DATA(cmsgh), local_size); + retinfo->levels[i] = cmsgh->cmsg_level; + retinfo->types[i] = cmsgh->cmsg_type; + retinfo->descr_per_ancillary[i] =local_size; + i++; + + } + retinfo->flags = msg.msg_flags; + retinfo->error_code = 0; + + //address = (struct sockaddr*) malloc (sizeof(struct sockaddr)); + memcpy(address,retinfo->address,sizeof(struct sockaddr)); + + + *addrlen = retinfo->addrlen; + *no_of_messages = retinfo->no_of_messages; + *size_of_ancillary = retinfo->size_of_ancillary; + + *length_of_messages = (int*) malloc (sizeof(int) * retinfo->no_of_messages); + //*length_of_messages = + memcpy(*length_of_messages, retinfo->length_of_messages, sizeof(int) * retinfo->no_of_messages); + + int counter = 0; + for (i=0; i< retinfo->no_of_messages; i++) + counter += retinfo->length_of_messages[i]; + + //*messages = (char*) malloc(sizeof(char) * counter); + memset(*messages, 0, sizeof(char) * counter); + counter = 0; + for(i=0; i< retinfo->no_of_messages; i++){ + memcpy(*messages+counter,retinfo->messages[i],retinfo->length_of_messages[i]); + counter += retinfo->length_of_messages[i]; + } + + *levels = (int*) malloc (sizeof(int) * retinfo->size_of_ancillary); + //*levels = + memcpy(*levels, retinfo->levels, sizeof(int) * retinfo->size_of_ancillary); + *types = (int*) malloc (sizeof(int) * retinfo->size_of_ancillary); + //*types = + memcpy(*types, retinfo->types, sizeof(int) * retinfo->size_of_ancillary); + *descr_per_ancillary = (int*) malloc (sizeof(int) * retinfo->size_of_ancillary); + //*descr_per_ancillary = + memcpy(*descr_per_ancillary, retinfo->descr_per_ancillary, sizeof(int) * retinfo->size_of_ancillary); + + counter = 0; + for (i=0; i < retinfo->size_of_ancillary; i++) + counter += retinfo->descr_per_ancillary[i]; + + *file_descr = (char*) malloc (sizeof(char) * counter); + memset(*file_descr, 0, sizeof(char) * counter); + counter = 0; + for (i=0; isize_of_ancillary; i++){ + memcpy(*file_descr+counter,retinfo->file_descr[i], retinfo->descr_per_ancillary[i]); + counter += retinfo->descr_per_ancillary[i]; + } + + *flag = retinfo->flags; + //int k; + //char* dsadas; + //dsadas = (char*) (*file_descr[0]); + //for (k=0; kno_of_messages * sizeof(int); k++) + // printf("0x%X ", dsadas[k]); + + free(retinfo->address); + free(retinfo->length_of_messages); + free(retinfo->levels); + free(retinfo->types); + free(retinfo->descr_per_ancillary); + for(i = 0; ino_of_messages; i++) + free(retinfo->messages[i]); + for (i = 0; i < retinfo->size_of_ancillary; i++) + free(retinfo->file_descr[i]); + free(retinfo->file_descr); + free(retinfo->messages); + free(retinfo); + free(controlbuf); + + return bytes_recvd; + + fail: + if (anc_alloc){ + free(retinfo->file_descr); + free(retinfo->levels); + free(retinfo->types); + free(retinfo->descr_per_ancillary); + free(retinfo->length_of_messages); + free(retinfo->messages[0]); + free(retinfo->messages); + free(retinfo->address); + free(controlbuf); + file_descr = NULL; + levels = NULL; + types = NULL; + descr_per_ancillary = NULL; + length_of_messages = NULL; + messages =NULL; + address = NULL; + addrlen = NULL; + no_of_messages = NULL; + size_of_ancillary = NULL; + + }else{ + if (iov_alloc){ + free(retinfo->length_of_messages); + free(retinfo->messages[0]); + free(retinfo->messages); + free(retinfo->address); + free(controlbuf); + length_of_messages = NULL; + messages =NULL; + address = NULL; + file_descr = NULL; + levels = NULL; + types = NULL; + descr_per_ancillary = NULL; + addrlen = NULL; + no_of_messages = NULL; + size_of_ancillary = NULL; + + } + } + return error_flag; + + err_closefds: + #ifdef SCM_RIGHTS + /* Close all descriptors coming from SCM_RIGHTS, so they don't leak. */ + for (cmsgh = ((msg.msg_controllen > 0) ? CMSG_FIRSTHDR(&msg) : NULL); + cmsgh != NULL; cmsgh = CMSG_NXTHDR(&msg, cmsgh)) { + size_t dataleng; + cmsg_status = get_cmsg_data_len(&msg, cmsgh, &dataleng); + cmsgdatalen = (int) dataleng; + if (cmsg_status < 0) + break; + if (cmsgh->cmsg_level == SOL_SOCKET && + cmsgh->cmsg_type == SCM_RIGHTS) { + size_t numfds; + int *fdp; + + numfds = cmsgdatalen / sizeof(int); + fdp = (int *)CMSG_DATA(cmsgh); + while (numfds-- > 0) + close(*fdp++); + } + if (cmsg_status != 0) + break; + } + #endif /* SCM_RIGHTS */ + goto fail; + } + + + //################################################################################################ + //send goes from here + + #ifdef CMSG_LEN + static int + get_CMSG_LEN(size_t length, size_t *result) + { + size_t tmp; + + if (length > (SOCKLEN_T_LIMIT - CMSG_LEN(0))) + return 0; + tmp = CMSG_LEN(length); + if ((tmp > SOCKLEN_T_LIMIT) || (tmp < length)) + return 0; + *result = tmp; + return 1; + } + #endif + + #ifdef CMSG_SPACE + /* If length is in range, set *result to CMSG_SPACE(length) and return + true; otherwise, return false. */ + static int + get_CMSG_SPACE(size_t length, size_t *result) + { + size_t tmp; + + /* Use CMSG_SPACE(1) here in order to take account of the padding + necessary before *and* after the data. */ + if (length > (SOCKLEN_T_LIMIT - CMSG_SPACE(1))) + return 0; + tmp = CMSG_SPACE(length); + if ((tmp > SOCKLEN_T_LIMIT) || (tmp < length)) + return 0; + *result = tmp; + return 1; + } + #endif + + RPY_EXTERN + int sendmsg_implementation(int socket, struct sockaddr* address, socklen_t addrlen, long* length_of_messages, char** messages, int no_of_messages, long* levels, long* types, char** file_descriptors, long* no_of_fds, int control_length, int flag ) + { + + struct msghdr msg = {0}; + struct cmsghdr *cmsg; + void* controlbuf = NULL; + int retval; + size_t i; + + // Add the address + + if (address != NULL) { + msg.msg_name = address; + msg.msg_namelen = addrlen; + } + // Add the message + struct iovec *iovs = NULL; + + if (no_of_messages > 0){ + + iovs = (struct iovec*) malloc(no_of_messages * sizeof(struct iovec)); + memset(iovs, 0, no_of_messages * sizeof(struct iovec)); + msg.msg_iov = iovs; + msg.msg_iovlen = no_of_messages; + + for (i=0; i< no_of_messages; i++){ + iovs[i].iov_base = messages[i]; + iovs[i].iov_len = length_of_messages[i]; + } + } + // Add the ancillary + + #ifndef CMSG_SPACE + if (control_length > 1){ + free(iovs); + return MUL_MSGS_NOT_SUP; + } + #endif + if (control_length > 0){ + //compute the total size of the ancillary + size_t total_size_of_ancillary = 0; + size_t space; + size_t controllen = 0, controllen_last = 0; + for (i = 0; i< control_length; i++){ + total_size_of_ancillary = no_of_fds[i]; + #ifdef CMSG_SPACE + if (!get_CMSG_SPACE(total_size_of_ancillary, &space)) { + #else + if (!get_CMSG_LEN(total_size_of_ancillary, &space)) { + #endif + if (iovs != NULL) + free(iovs); + return ANC_DATA_TOO_LARGE; + } + controllen +=space; + if ((controllen > SOCKLEN_T_LIMIT) || (controllen < controllen_last)) { + if (iovs != NULL) + free(iovs); + return ANC_DATA_TOO_LARGEX; + } + controllen_last = controllen; + + } + + controlbuf = malloc(controllen); //* sizeof(int) + + msg.msg_control= controlbuf; + msg.msg_controllen = controllen; + + memset(controlbuf, 0, controllen); + + cmsg = NULL; + for (i = 0; i< control_length; i++){ + cmsg = (i == 0) ? CMSG_FIRSTHDR(&msg) : CMSG_NXTHDR(&msg, cmsg); + + cmsg->cmsg_level = (int) levels[i]; + cmsg->cmsg_type = (int) types[i]; + cmsg->cmsg_len = CMSG_LEN(sizeof(char) * no_of_fds[i]); + memcpy(CMSG_DATA(cmsg), file_descriptors[i], sizeof(char) * no_of_fds[i]); + } + + + } + // Add the flags + msg.msg_flags = flag; + + // Send the data + retval = sendmsg(socket, &msg, flag); + + if (iovs != NULL) + free(iovs); + if (controlbuf !=NULL) + free(controlbuf); + + return retval; + } + #ifdef CMSG_SPACE + RPY_EXTERN + size_t CMSG_SPACE_wrapper(size_t desired_space){ + size_t result; + if (!get_CMSG_SPACE(desired_space, &result)){ + return 0; + } + return result; + } + #endif + + #ifdef CMSG_LEN + + RPY_EXTERN + size_t CMSG_LEN_wrapper(size_t desired_len){ + size_t result; + if (!get_CMSG_LEN(desired_len, &result)){ + return 0; + } + return result; + } + #endif + + RPY_EXTERN + char* memcpy_from_CCHARP_at_offset_and_size(char* string, int offset, int size){ + char* buffer; + buffer = (char*)malloc(sizeof(char)*size); + buffer = memcpy(buffer, string + offset, size); + return buffer; + } + + RPY_EXTERN + int free_pointer_to_signedp(int** ptrtofree){ + free(*ptrtofree); + return 0; + } + + RPY_EXTERN + int free_ptr_to_charp(char** ptrtofree){ + free(*ptrtofree); + return 0; + } + + ''',] + + post_include_bits =[ "RPY_EXTERN " + "int sendmsg_implementation(int socket, struct sockaddr* address, socklen_t addrlen, long* length_of_messages, char** messages, int no_of_messages, long* levels, long* types, char** file_descriptors, long* no_of_fds, int control_length, int flag );\n" + "RPY_EXTERN " + "int recvmsg_implementation(int socket_fd, int message_size, int ancillary_size, int flags, struct sockaddr* address, socklen_t* addrlen, int** length_of_messages, char** messages, int* no_of_messages, int* size_of_ancillary, int** levels, int** types, char** file_descr, int** descr_per_ancillary, int* flag);\n" + "static " + "int cmsg_min_space(struct msghdr *msg, struct cmsghdr *cmsgh, size_t space);\n" + "static " + "int get_cmsg_data_space(struct msghdr *msg, struct cmsghdr *cmsgh, size_t *space);\n" + "static " + "int get_cmsg_data_len(struct msghdr *msg, struct cmsghdr *cmsgh, size_t *data_len);\n" + "static " + "int get_CMSG_LEN(size_t length, size_t *result);\n" + "static " + "int get_CMSG_SPACE(size_t length, size_t *result);\n" + "RPY_EXTERN " + "size_t CMSG_LEN_wrapper(size_t desired_len);\n" + "RPY_EXTERN " + "size_t CMSG_SPACE_wrapper(size_t desired_space);\n" + "RPY_EXTERN " + "char* memcpy_from_CCHARP_at_offset_and_size(char* string, int offset, int size);\n" + "RPY_EXTERN " + "int free_pointer_to_signedp(int** ptrtofree);\n" + "RPY_EXTERN " + "int free_ptr_to_charp(char** ptrtofree);\n" + ] + + #CConfig.SignedPP = lltype.Ptr(lltype.Array(rffi.SIGNEDP, hints={'nolength': True})) + + + # CConfig.recvmsginfo = platform.Struct('struct recvmsg_info', + # [('error_code',rffi.SIGNED), + # ('address',sockaddr_ptr), + # ('addrlen',socklen_t_ptr), + # ('length_of_messages', rffi.SIGNEDP), + # ('messages',rffi.CCHARPP), + # ('no_of_messages',rffi.INT), + # ('size_of_ancillary',rffi.INT), + # ('levels', rffi.SIGNEDP), + # ('types', rffi.SIGNEDP), + # ('file_descr', rffi.CCHARPP), + # ('descr_per_ancillary', rffi.SIGNEDP), + # ('flags', rffi.INT), + # ]) + + # + + compilation_info = ExternalCompilationInfo( + includes=includes, + separate_module_sources=separate_module_sources, + post_include_bits=post_include_bits, + ) + if _WIN32: CConfig.WSAEVENT = platform.SimpleType('WSAEVENT', rffi.VOIDP) CConfig.WSANETWORKEVENTS = platform.Struct( @@ -387,6 +1060,7 @@ sockaddr_ptr.TO.become(cConfig.sockaddr) addrinfo_ptr.TO.become(cConfig.addrinfo) + # fill in missing constants with reasonable defaults cConfig.NI_MAXHOST = cConfig.NI_MAXHOST or 1025 cConfig.NI_MAXSERV = cConfig.NI_MAXSERV or 32 @@ -571,11 +1245,32 @@ recvfrom = external('recvfrom', [socketfd_type, rffi.VOIDP, size_t, rffi.INT, sockaddr_ptr, socklen_t_ptr], rffi.INT, save_err=SAVE_ERR) +recvmsg = jit.dont_look_inside(rffi.llexternal("recvmsg_implementation", + [rffi.INT, rffi.INT, rffi.INT, rffi.INT,sockaddr_ptr, socklen_t_ptr, rffi.SIGNEDPP, rffi.CCHARPP, + rffi.SIGNEDP,rffi.SIGNEDP, rffi.SIGNEDPP, rffi.SIGNEDPP, rffi.CCHARPP, rffi.SIGNEDPP, rffi.SIGNEDP], + rffi.INT, save_err=SAVE_ERR, + compilation_info=compilation_info)) + +memcpy_from_CCHARP_at_offset = jit.dont_look_inside(rffi.llexternal("memcpy_from_CCHARP_at_offset_and_size", + [rffi.CCHARP,rffi.INT,rffi.INT],rffi.CCHARP,save_err=SAVE_ERR,compilation_info=compilation_info)) +freeccharp = jit.dont_look_inside(rffi.llexternal("free_ptr_to_charp", + [rffi.CCHARPP],rffi.INT,save_err=SAVE_ERR,compilation_info=compilation_info)) +freesignedp = jit.dont_look_inside(rffi.llexternal("free_pointer_to_signedp", + [rffi.SIGNEDPP],rffi.INT,save_err=SAVE_ERR,compilation_info=compilation_info)) + send = external('send', [socketfd_type, rffi.CCHARP, size_t, rffi.INT], ssize_t, save_err=SAVE_ERR) sendto = external('sendto', [socketfd_type, rffi.VOIDP, size_t, rffi.INT, sockaddr_ptr, socklen_t], ssize_t, save_err=SAVE_ERR) +sendmsg = jit.dont_look_inside(rffi.llexternal("sendmsg_implementation", + [rffi.INT, sockaddr_ptr, socklen_t, rffi.SIGNEDP, rffi.CCHARPP, rffi.INT, + rffi.SIGNEDP, rffi.SIGNEDP, rffi.CCHARPP, rffi.SIGNEDP, rffi.INT, rffi.INT], + rffi.INT, save_err=SAVE_ERR, + compilation_info=compilation_info)) +CMSG_SPACE = jit.dont_look_inside(rffi.llexternal("CMSG_SPACE_wrapper",[size_t], size_t, save_err=SAVE_ERR,compilation_info=compilation_info)) +CMSG_LEN = jit.dont_look_inside(rffi.llexternal("CMSG_LEN_wrapper",[size_t], size_t, save_err=SAVE_ERR,compilation_info=compilation_info)) + socketshutdown = external('shutdown', [socketfd_type, rffi.INT], rffi.INT, save_err=SAVE_ERR) gethostname = external('gethostname', [rffi.CCHARP, rffi.INT], rffi.INT, diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -963,6 +963,104 @@ return (read_bytes, address) raise self.error_handler() + @jit.dont_look_inside + def recvmsg(self, message_size, ancbufsize = 0, flags = 0): + if message_size < 0: + raise RSocketError("Invalid message size") + if ancbufsize < 0: + raise RSocketError("invalid ancillary data buffer length") + + # addr, maxlen = make_null_address(self.family) + # addrlen_p = lltype.malloc(_c.socklen_t_ptr.TO, flavor='raw') + # addrlen_p[0] = rffi.cast(_c.socklen_t, maxlen) + address, addr_p, addrlen_p = self._addrbuf() + len_of_msgs = lltype.malloc(rffi.SIGNEDPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False) + messages = lltype.malloc(rffi.CCHARPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) + messages[0] = lltype.malloc(rffi.CCHARP.TO, message_size,flavor='raw',track_allocation=True,nonmovable=False) + rffi.c_memset(messages[0], 0, message_size) + no_of_messages = lltype.malloc(rffi.SIGNEDP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) + no_of_messages[0] = rffi.cast(rffi.SIGNED, 0) + size_of_anc = lltype.malloc(rffi.SIGNEDP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) + size_of_anc[0] = rffi.cast(rffi.SIGNED,0) + levels = lltype.malloc(rffi.SIGNEDPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False) + types = lltype.malloc(rffi.SIGNEDPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False) + file_descr = lltype.malloc(rffi.CCHARPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) + descr_per_anc = lltype.malloc(rffi.SIGNEDPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False) + retflag = lltype.malloc(rffi.SIGNEDP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) + retflag[0] = rffi.cast(rffi.SIGNED,0) + + LONG_MASK = 2**32 - 1 + reply = _c.recvmsg(self.fd, rffi.cast(lltype.Signed,message_size), + rffi.cast(lltype.Signed,ancbufsize),rffi.cast(lltype.Signed,flags), + addr_p, addrlen_p, len_of_msgs, messages, no_of_messages,size_of_anc, + levels, types,file_descr,descr_per_anc,retflag) + if reply >= 0: + msg_no = rffi.cast(rffi.SIGNED,no_of_messages[0]) + anc_size = rffi.cast(rffi.SIGNED,size_of_anc[0]) + returnflag = rffi.cast(rffi.SIGNED,retflag[0]) + addrlen = rffi.cast(rffi.SIGNED,addrlen_p[0]) + retmsg = "" + + for i in range(msg_no): + x = rffi.cast(rffi.SIGNED,len_of_msgs[0][i]) + x &= LONG_MASK + retmsg = rffi.charp2strn(messages[0],x) + + offset = 0 + list_of_tuples = [] + for i in range(anc_size): + x = rffi.cast(rffi.SIGNED, levels[0][i]) + x &= LONG_MASK + level = x + x = rffi.cast(rffi.SIGNED,types[0][i]) + x &= LONG_MASK + type = x + x = rffi.cast(rffi.SIGNED,descr_per_anc[0][i]) + x &= LONG_MASK + bytes_in_anc = x + pre_anc = _c.memcpy_from_CCHARP_at_offset(file_descr[0],rffi.cast(rffi.SIGNED,offset), bytes_in_anc) + anc = rffi.charpsize2str(pre_anc,bytes_in_anc) + tup = (level,type, anc) + list_of_tuples.append(tup) + offset += bytes_in_anc + #lltype.free(pre_anc, flavor='raw') + #address.unlock() + if addrlen: + address.addrlen = addrlen + else: + address = None + + + rettup = (retmsg,list_of_tuples,returnflag,address) + + #free underlying complexity first + if address is not None: + address.unlock() + # lltype.free(messages[0],flavor='raw') + _c.freeccharp(file_descr) + _c.freesignedp(len_of_msgs) + _c.freesignedp(levels) + _c.freesignedp(types) + _c.freesignedp(descr_per_anc) + + lltype.free(messages,flavor='raw') + lltype.free(file_descr,flavor='raw') + lltype.free(len_of_msgs,flavor='raw') + lltype.free(no_of_messages, flavor='raw') + lltype.free(size_of_anc, flavor='raw') + lltype.free(levels, flavor='raw') + lltype.free(descr_per_anc, flavor='raw') + lltype.free(retflag, flavor='raw') + lltype.free(addrlen_p,flavor='raw') + + return rettup + else: + if address is not None: + address.unlock() + raise last_error() + + + def send_raw(self, dataptr, length, flags=0): """Send data from a CCHARP buffer.""" self.wait_for_data(True) @@ -1009,6 +1107,77 @@ raise self.error_handler() return res + @jit.dont_look_inside + def sendmsg(self, messages, ancillary=None, flags=0, address=None): + # addr = address.lock() + # addrlen = address.addrlen + need_to_free_address = True + if address is None: + need_to_free_address = False + addr = lltype.nullptr(_c.sockaddr) + addrlen = 0 + else: + addr = address.lock() + addrlen = address.addrlen + + no_of_messages = len(messages) + messages_ptr = lltype.malloc(rffi.CCHARPP.TO,no_of_messages+1,flavor='raw',track_allocation=True,nonmovable=False) + messages_length_ptr = lltype.malloc(rffi.SIGNEDP.TO,no_of_messages,flavor='raw',zero=True, track_allocation=True,nonmovable=False) + counter = 0 + for message in messages: + messages_ptr[counter] = rffi.str2charp(message) + messages_length_ptr[counter] = rffi.cast(rffi.SIGNED, len(message)) + #messages_length_ptr[counter] = rffi.cast(rffi.SIGNED, 0x00cabc00abcabc00) + counter += 1 + messages_ptr[counter] = lltype.nullptr(rffi.CCHARP.TO) + if ancillary is not None: + size_of_ancillary = len(ancillary) + else: + size_of_ancillary = 0 + levels = lltype.malloc(rffi.SIGNEDP.TO, size_of_ancillary,flavor='raw',zero=True, track_allocation=True,nonmovable=False) + types = lltype.malloc(rffi.SIGNEDP.TO, size_of_ancillary,flavor='raw',zero=True, track_allocation=True,nonmovable=False) + desc_per_ancillary = lltype.malloc(rffi.SIGNEDP.TO, size_of_ancillary,flavor='raw',zero=True, track_allocation=True,nonmovable=False) + file_descr = lltype.malloc(rffi.CCHARPP.TO, size_of_ancillary,flavor='raw', track_allocation=True,nonmovable=False) + if ancillary is not None: + counter = 0 + for level, type, content in ancillary: + assert isinstance(type,int) + assert isinstance(level, int) + levels[counter] = rffi.cast(rffi.SIGNED,level) + types[counter] = rffi.cast(rffi.SIGNED,type) + desc_per_ancillary[counter] = rffi.cast(rffi.SIGNED, (len(content))) + #file_descr[counter] = lltype.malloc(rffi.CCHARP.TO,len(content),flavor='raw',zero=True, track_allocation=True,nonmovable=False) + file_descr[counter] = rffi.str2charp(content, track_allocation=True) + counter +=1 + else: + size_of_ancillary = 0 + snd_no_msgs = rffi.cast(rffi.SIGNED, no_of_messages) + snd_anc_size =rffi.cast(rffi.SIGNED, size_of_ancillary) + bytes_sent = _c.sendmsg(self.fd, addr, addrlen, messages_length_ptr, messages_ptr, snd_no_msgs,levels,types,file_descr,desc_per_ancillary,snd_anc_size,flags) + + + if need_to_free_address: + address.unlock() + for i in range(len(messages)): + lltype.free(messages_ptr[i], flavor='raw', track_allocation=True) + lltype.free(messages_ptr, flavor='raw', track_allocation=True) + lltype.free(messages_length_ptr, flavor='raw', track_allocation=True) + + if size_of_ancillary > 0: + for i in range(len(ancillary)): + lltype.free(file_descr[i], flavor='raw', track_allocation=True) + lltype.free(desc_per_ancillary, flavor='raw', track_allocation=True) + lltype.free(types, flavor='raw', track_allocation=True) + lltype.free(levels, flavor='raw', track_allocation=True) + lltype.free(file_descr, flavor='raw', track_allocation=True) + + if (bytes_sent < 0) and (bytes_sent!=-1000) and (bytes_sent!=-1001) and (bytes_sent!=-1002): + raise last_error() + + return bytes_sent + + + def setblocking(self, block): if block: timeout = -1.0 @@ -1190,6 +1359,19 @@ return (make_socket(fd0, family, type, proto, SocketClass), make_socket(fd1, family, type, proto, SocketClass)) +if _c._POSIX: + def CMSG_LEN( demanded_len): + if demanded_len < 0: + return 0 + result = _c.CMSG_LEN(demanded_len) + return result + + def CMSG_SPACE( demanded_size): + if demanded_size < 0: + return 0 + result = _c.CMSG_SPACE(demanded_size) + return result + if _c.WIN32: def dup(fd, inheritable=True): with lltype.scoped_alloc(_c.WSAPROTOCOL_INFO, zero=True) as info: diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -752,7 +752,8 @@ # Signed, Signed * SIGNED = lltype.Signed -SIGNEDP = lltype.Ptr(lltype.Array(SIGNED, hints={'nolength': True})) +SIGNEDP = lltype.Ptr(lltype.Array(lltype.Signed, hints={'nolength': True})) +SIGNEDPP = lltype.Ptr(lltype.Array(SIGNEDP, hints={'nolength': True})) # various type mapping From pypy.commits at gmail.com Tue Jul 18 06:02:39 2017 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 18 Jul 2017 03:02:39 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in Dodan/pgo_clang_support/Enable_PGO_for_clang (pull request #554) Message-ID: <596ddcbf.15ae1c0a.4b2df.6054@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: Changeset: r91917:75bf1a95154b Date: 2017-07-18 10:02 +0000 http://bitbucket.org/pypy/pypy/changeset/75bf1a95154b/ Log: Merged in Dodan/pgo_clang_support/Enable_PGO_for_clang (pull request #554) Enable PGO for CLang (there's code in CPython that does the equivalent roughly here: http s://github.com/python/cpython/blob/6b42eb17649bed9615b6e6cecaefdb2f4 6990b2c/configure#L6753 ) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -382,6 +382,80 @@ if self.config.translation.profopt: if self.config.translation.profoptargs is None: raise Exception("No profoptargs specified, neither in the command line, nor in the target. If the target is not PyPy, please specify profoptargs") + + # Set the correct PGO params based on OS and CC + profopt_gen_flag = "" + profopt_use_flag = "" + profopt_merger = "" + profopt_file = "" + llvm_profdata = "" + + cc = self.translator.platform.cc + + # Locate llvm-profdata + if "clang" in cc: + clang_bin = cc + path = os.environ.get("PATH").split(":") + profdata_found = False + + # Try to find it in $PATH (Darwin and Linux) + for dir in path: + bin = "%s/llvm-profdata" % dir + if os.path.isfile(bin): + llvm_profdata = bin + profdata_found = True + break + + # If not found, try to find it where clang is actually installed (Darwin and Linux) + if not profdata_found: + # If the full path is not given, find where clang is located + if not os.path.isfile(clang_bin): + for dir in path: + bin = "%s/%s" % (dir, cc) + if os.path.isfile(bin): + clang_bin = bin + break + # Some systems install clang elsewhere as a symlink to the real path, + # which is where the related llvm tools are located. + if os.path.islink(clang_bin): + clang_bin = os.path.realpath(clang_bin) # the real clang binary + # llvm-profdata must be in the same directory as clang + llvm_profdata = "%s/llvm-profdata" % os.path.dirname(clang_bin) + profdata_found = os.path.isfile(llvm_profdata) + + # If not found, and Darwin is used, try to find it in the development environment + # More: https://apple.stackexchange.com/questions/197053/ + if not profdata_found and sys.platform == 'darwin': + code = os.system("/usr/bin/xcrun -find llvm-profdata 2>/dev/null") + if code == 0: + llvm_profdata = "/usr/bin/xcrun llvm-profdata" + profdata_found = True + + # If everything failed, throw Exception, sorry + if not profdata_found: + raise Exception( + "Error: Cannot perform profopt build because llvm-profdata was not found in PATH. " + "Please add it to PATH and run the translation again.") + + # Set the PGO flags + if "clang" in cc: + # Any changes made here should be reflected in the GCC+Darwin case below + profopt_gen_flag = "-fprofile-instr-generate" + profopt_use_flag = "-fprofile-instr-use=code.profclangd" + profopt_merger = "%s merge -output=code.profclangd *.profclangr" % llvm_profdata + profopt_file = 'LLVM_PROFILE_FILE="code-%p.profclangr"' + elif "gcc" in cc: + if sys.platform == 'darwin': + profopt_gen_flag = "-fprofile-instr-generate" + profopt_use_flag = "-fprofile-instr-use=code.profclangd" + profopt_merger = "%s merge -output=code.profclangd *.profclangr" % llvm_profdata + profopt_file = 'LLVM_PROFILE_FILE="code-%p.profclangr"' + else: + profopt_gen_flag = "-fprofile-generate" + profopt_use_flag = "-fprofile-use -fprofile-correction" + profopt_merger = "true" + profopt_file = "" + if self.config.translation.shared: mk.rule('$(PROFOPT_TARGET)', '$(TARGET) main.o', '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov') @@ -390,10 +464,11 @@ rules.append( ('profopt', '', [ - '$(MAKE) CFLAGS="-fprofile-generate -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-generate $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', - '%s %s ' % (exe_name, self.config.translation.profoptargs), + '$(MAKE) CFLAGS="%s -fPIC $(CFLAGS)" LDFLAGS="%s $(LDFLAGS)" $(PROFOPT_TARGET)' % (profopt_gen_flag, profopt_gen_flag), + '%s %s %s ' % (profopt_file, exe_name, self.config.translation.profoptargs), + '%s' % (profopt_merger), '$(MAKE) clean_noprof', - '$(MAKE) CFLAGS="-fprofile-use -fprofile-correction -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-use $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', + '$(MAKE) CFLAGS="%s -fPIC $(CFLAGS)" LDFLAGS="%s $(LDFLAGS)" $(PROFOPT_TARGET)' % (profopt_use_flag, profopt_use_flag), ])) for rule in rules: From pypy.commits at gmail.com Tue Jul 18 06:02:37 2017 From: pypy.commits at gmail.com (Dodan) Date: Tue, 18 Jul 2017 03:02:37 -0700 (PDT) Subject: [pypy-commit] pypy Enable_PGO_for_clang: Enable PGO for CLang Message-ID: <596ddcbd.1a6d1c0a.df0d5.813c@mx.google.com> Author: Dodan Mihai Branch: Enable_PGO_for_clang Changeset: r91916:10da3d6507ca Date: 2017-06-14 13:33 +0300 http://bitbucket.org/pypy/pypy/changeset/10da3d6507ca/ Log: Enable PGO for CLang diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -382,6 +382,80 @@ if self.config.translation.profopt: if self.config.translation.profoptargs is None: raise Exception("No profoptargs specified, neither in the command line, nor in the target. If the target is not PyPy, please specify profoptargs") + + # Set the correct PGO params based on OS and CC + profopt_gen_flag = "" + profopt_use_flag = "" + profopt_merger = "" + profopt_file = "" + llvm_profdata = "" + + cc = self.translator.platform.cc + + # Locate llvm-profdata + if "clang" in cc: + clang_bin = cc + path = os.environ.get("PATH").split(":") + profdata_found = False + + # Try to find it in $PATH (Darwin and Linux) + for dir in path: + bin = "%s/llvm-profdata" % dir + if os.path.isfile(bin): + llvm_profdata = bin + profdata_found = True + break + + # If not found, try to find it where clang is actually installed (Darwin and Linux) + if not profdata_found: + # If the full path is not given, find where clang is located + if not os.path.isfile(clang_bin): + for dir in path: + bin = "%s/%s" % (dir, cc) + if os.path.isfile(bin): + clang_bin = bin + break + # Some systems install clang elsewhere as a symlink to the real path, + # which is where the related llvm tools are located. + if os.path.islink(clang_bin): + clang_bin = os.path.realpath(clang_bin) # the real clang binary + # llvm-profdata must be in the same directory as clang + llvm_profdata = "%s/llvm-profdata" % os.path.dirname(clang_bin) + profdata_found = os.path.isfile(llvm_profdata) + + # If not found, and Darwin is used, try to find it in the development environment + # More: https://apple.stackexchange.com/questions/197053/ + if not profdata_found and sys.platform == 'darwin': + code = os.system("/usr/bin/xcrun -find llvm-profdata 2>/dev/null") + if code == 0: + llvm_profdata = "/usr/bin/xcrun llvm-profdata" + profdata_found = True + + # If everything failed, throw Exception, sorry + if not profdata_found: + raise Exception( + "Error: Cannot perform profopt build because llvm-profdata was not found in PATH. " + "Please add it to PATH and run the translation again.") + + # Set the PGO flags + if "clang" in cc: + # Any changes made here should be reflected in the GCC+Darwin case below + profopt_gen_flag = "-fprofile-instr-generate" + profopt_use_flag = "-fprofile-instr-use=code.profclangd" + profopt_merger = "%s merge -output=code.profclangd *.profclangr" % llvm_profdata + profopt_file = 'LLVM_PROFILE_FILE="code-%p.profclangr"' + elif "gcc" in cc: + if sys.platform == 'darwin': + profopt_gen_flag = "-fprofile-instr-generate" + profopt_use_flag = "-fprofile-instr-use=code.profclangd" + profopt_merger = "%s merge -output=code.profclangd *.profclangr" % llvm_profdata + profopt_file = 'LLVM_PROFILE_FILE="code-%p.profclangr"' + else: + profopt_gen_flag = "-fprofile-generate" + profopt_use_flag = "-fprofile-use -fprofile-correction" + profopt_merger = "true" + profopt_file = "" + if self.config.translation.shared: mk.rule('$(PROFOPT_TARGET)', '$(TARGET) main.o', '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov') @@ -390,10 +464,11 @@ rules.append( ('profopt', '', [ - '$(MAKE) CFLAGS="-fprofile-generate -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-generate $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', - '%s %s ' % (exe_name, self.config.translation.profoptargs), + '$(MAKE) CFLAGS="%s -fPIC $(CFLAGS)" LDFLAGS="%s $(LDFLAGS)" $(PROFOPT_TARGET)' % (profopt_gen_flag, profopt_gen_flag), + '%s %s %s ' % (profopt_file, exe_name, self.config.translation.profoptargs), + '%s' % (profopt_merger), '$(MAKE) clean_noprof', - '$(MAKE) CFLAGS="-fprofile-use -fprofile-correction -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-use $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', + '$(MAKE) CFLAGS="%s -fPIC $(CFLAGS)" LDFLAGS="%s $(LDFLAGS)" $(PROFOPT_TARGET)' % (profopt_use_flag, profopt_use_flag), ])) for rule in rules: From pypy.commits at gmail.com Tue Jul 18 13:38:25 2017 From: pypy.commits at gmail.com (mjacob) Date: Tue, 18 Jul 2017 10:38:25 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add first test for aclose() on async generator and make it pass. Message-ID: <596e4791.b485df0a.32022.2229@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91918:a52d37ceba11 Date: 2017-07-18 19:35 +0200 http://bitbucket.org/pypy/pypy/changeset/a52d37ceba11/ Log: Add first test for aclose() on async generator and make it pass. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -602,7 +602,7 @@ return AsyncGenAThrow(self, w_type, w_val, w_tb) def descr_aclose(self): - XXX + return AsyncGenAThrow(self, None, None, None) class AsyncGenValueWrapper(W_Root): @@ -709,14 +709,30 @@ try: if throwing: - w_value = self.async_gen.throw(self.w_exc_type, - self.w_exc_value, - self.w_exc_tb) + if self.w_exc_type is None: + w_value = self.async_gen.throw(space.w_GeneratorExit, + None, None) + if w_value is not None: + XXX + else: + w_value = self.async_gen.throw(self.w_exc_type, + self.w_exc_value, + self.w_exc_tb) else: w_value = self.async_gen.send_ex(w_arg_or_err) return self.unwrap_value(w_value) except OperationError as e: - self.state = self.ST_CLOSED + if e.match(space, space.w_StopAsyncIteration): + self.state = self.ST_CLOSED + if self.w_exc_type is None: + # When aclose() is called we don't want to propagate + # StopAsyncIteration; just raise StopIteration, signalling + # that 'aclose()' is done. + raise OperationError(space.w_StopIteration, space.w_None) + if e.match(space, space.w_GeneratorExit): + self.state = self.ST_CLOSED + # Ignore this error. + raise OperationError(space.w_StopIteration, space.w_None) raise def descr_throw(self, w_type, w_val=None, w_tb=None): diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -448,3 +448,25 @@ except StopIteration: assert values == [42] """ + + def test_async_aclose(self): """ + raises_generator_exit = False + async def ag(): + nonlocal raises_generator_exit + try: + yield + except GeneratorExit: + raises_generator_exit = True + raise + + async def run(): + a = ag() + async for i in a: + break + await a.aclose() + try: + run().send(None) + except StopIteration: + pass + assert raises_generator_exit + """ From pypy.commits at gmail.com Tue Jul 18 13:57:01 2017 From: pypy.commits at gmail.com (mjacob) Date: Tue, 18 Jul 2017 10:57:01 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add test and implementation for async generator ignoring GeneratorExit. Message-ID: <596e4bed.ce8c1c0a.bf2d8.cffb@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91919:958a9c75a085 Date: 2017-07-18 19:56 +0200 http://bitbucket.org/pypy/pypy/changeset/958a9c75a085/ Log: Add test and implementation for async generator ignoring GeneratorExit. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -713,7 +713,8 @@ w_value = self.async_gen.throw(space.w_GeneratorExit, None, None) if w_value is not None: - XXX + raise oefmt(space.w_RuntimeError, + "async generator ignored GeneratorExit") else: w_value = self.async_gen.throw(self.w_exc_type, self.w_exc_value, diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -470,3 +470,18 @@ pass assert raises_generator_exit """ + + def test_async_aclose_ignore_generator_exit(self): """ + async def ag(): + try: + yield + except GeneratorExit: + yield + + async def run(): + a = ag() + async for i in a: + break + await a.aclose() + raises(RuntimeError, run().send, None) + """ From pypy.commits at gmail.com Tue Jul 18 18:22:56 2017 From: pypy.commits at gmail.com (mjacob) Date: Tue, 18 Jul 2017 15:22:56 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add a TODO. Message-ID: <596e8a40.c7331c0a.5372f.ac19@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91920:7f4ec0d17965 Date: 2017-07-18 19:59 +0200 http://bitbucket.org/pypy/pypy/changeset/7f4ec0d17965/ Log: Add a TODO. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -710,6 +710,7 @@ try: if throwing: if self.w_exc_type is None: + # TODO: add equivalent to CPython's o->agt_gen->ag_closed = 1; w_value = self.async_gen.throw(space.w_GeneratorExit, None, None) if w_value is not None: From pypy.commits at gmail.com Tue Jul 18 18:22:58 2017 From: pypy.commits at gmail.com (mjacob) Date: Tue, 18 Jul 2017 15:22:58 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Implement AsyncGenABase.descr_close(). Translation should work again! Message-ID: <596e8a42.45091c0a.51d17.ce56@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91921:3de6fb333784 Date: 2017-07-19 00:21 +0200 http://bitbucket.org/pypy/pypy/changeset/3de6fb333784/ Log: Implement AsyncGenABase.descr_close(). Translation should work again! diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -642,7 +642,7 @@ raise def descr_close(self): - XXX + self.state = self.ST_CLOSED def unwrap_value(self, w_value): if isinstance(w_value, AsyncGenValueWrapper): diff --git a/pypy/interpreter/test/test_coroutine.py b/pypy/interpreter/test/test_coroutine.py --- a/pypy/interpreter/test/test_coroutine.py +++ b/pypy/interpreter/test/test_coroutine.py @@ -485,3 +485,17 @@ await a.aclose() raises(RuntimeError, run().send, None) """ + + def test_async_anext_close(self): """ + async def ag(): + yield 42 + + an = ag().__anext__() + an.close() + try: + next(an) + except StopIteration as e: + assert e.value is None + else: + assert False, "didn't raise" + """ From pypy.commits at gmail.com Wed Jul 19 10:15:45 2017 From: pypy.commits at gmail.com (smihnea) Date: Wed, 19 Jul 2017 07:15:45 -0700 (PDT) Subject: [pypy-commit] pypy pypy_bytearray: bytearray performance fix(ported from PyPy3) Message-ID: <596f6991.2283df0a.4ea3a.a149@mx.google.com> Author: Mihnea Saracin Branch: pypy_bytearray Changeset: r91922:b43a6e2c0ea1 Date: 2017-07-19 13:25 +0300 http://bitbucket.org/pypy/pypy/changeset/b43a6e2c0ea1/ Log: bytearray performance fix(ported from PyPy3) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -567,24 +567,16 @@ raise else: return list(buf.as_str()) + return _from_byte_sequence(space, w_source) - # sequence of bytes - w_iter = space.iter(w_source) - length_hint = space.length_hint(w_source, 0) - data = newlist_hint(length_hint) - extended = 0 - while True: - try: - w_item = space.next(w_iter) - except OperationError as e: - if not e.match(space, space.w_StopIteration): - raise - break - data.append(space.byte_w(w_item)) - extended += 1 - if extended < length_hint: - resizelist_hint(data, extended) - return data +def _from_byte_sequence(space, w_source): + # Split off in a separate function for the JIT's benefit + w_result = space.appexec([w_source], """(seq): + result = bytearray() + for i in seq: + result.append(i) + return result""") + return w_result.getdata() def _hex_digit_to_int(d): From pypy.commits at gmail.com Wed Jul 19 10:15:48 2017 From: pypy.commits at gmail.com (rlamy) Date: Wed, 19 Jul 2017 07:15:48 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in smihnea/pypy_bytearray/pypy_bytearray (pull request #559) Message-ID: <596f6994.08891c0a.7a772.0b74@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91923:94b71132a96e Date: 2017-07-19 14:15 +0000 http://bitbucket.org/pypy/pypy/changeset/94b71132a96e/ Log: Merged in smihnea/pypy_bytearray/pypy_bytearray (pull request #559) bytearray performance fix(ported from PyPy3) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -567,24 +567,16 @@ raise else: return list(buf.as_str()) + return _from_byte_sequence(space, w_source) - # sequence of bytes - w_iter = space.iter(w_source) - length_hint = space.length_hint(w_source, 0) - data = newlist_hint(length_hint) - extended = 0 - while True: - try: - w_item = space.next(w_iter) - except OperationError as e: - if not e.match(space, space.w_StopIteration): - raise - break - data.append(space.byte_w(w_item)) - extended += 1 - if extended < length_hint: - resizelist_hint(data, extended) - return data +def _from_byte_sequence(space, w_source): + # Split off in a separate function for the JIT's benefit + w_result = space.appexec([w_source], """(seq): + result = bytearray() + for i in seq: + result.append(i) + return result""") + return w_result.getdata() def _hex_digit_to_int(d): From pypy.commits at gmail.com Wed Jul 19 12:29:44 2017 From: pypy.commits at gmail.com (rlamy) Date: Wed, 19 Jul 2017 09:29:44 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Add missing function PyObject_LengthHint Message-ID: <596f88f8.17addf0a.85ea3.0fe7@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91924:85b1a22bbd65 Date: 2017-07-19 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/85b1a22bbd65/ Log: Add missing function PyObject_LengthHint diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -462,6 +462,12 @@ fwrite(buf, 1, count, fp) return 0 + at cts.decl(""" + Py_ssize_t PyObject_LengthHint(PyObject *o, Py_ssize_t defaultvalue)""", + error=-1) +def PyObject_LengthHint(space, w_o, defaultvalue): + return space.length_hint(w_o, defaultvalue) + @cpython_api([lltype.Signed], lltype.Void) def _PyPyGC_AddMemoryPressure(space, report): from rpython.rlib import rgc diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -349,6 +349,27 @@ assert type(module.asbytes(sub1(b''))) is bytes assert type(module.asbytes(sub2(b''))) is sub2 + def test_LengthHint(self): + import operator + class WithLen: + def __len__(self): + return 1 + def __length_hint__(self): + return 42 + class NoLen: + def __length_hint__(self): + return 2 + module = self.import_extension('test_LengthHint', [ + ('length_hint', 'METH_VARARGS', + """ + PyObject *obj = PyTuple_GET_ITEM(args, 0); + Py_ssize_t i = PyLong_AsSsize_t(PyTuple_GET_ITEM(args, 1)); + return PyLong_FromSsize_t(PyObject_LengthHint(obj, i)); + """)]) + assert module.length_hint(WithLen(), 5) == operator.length_hint(WithLen(), 5) == 1 + assert module.length_hint(NoLen(), 5) == operator.length_hint(NoLen(), 5) == 2 + assert module.length_hint(object(), 5) == operator.length_hint(object(), 5) == 5 + def test_add_memory_pressure(self): self.reset_memory_pressure() # for the potential skip module = self.import_extension('foo', [ @@ -528,4 +549,3 @@ Py_RETURN_NONE; """)]) assert module.release() is None - From pypy.commits at gmail.com Wed Jul 19 12:59:08 2017 From: pypy.commits at gmail.com (rlamy) Date: Wed, 19 Jul 2017 09:59:08 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <596f8fdc.52851c0a.54def.2d1c@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91925:71fecdeaec70 Date: 2017-07-19 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/71fecdeaec70/ Log: hg merge default diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -382,6 +382,80 @@ if self.config.translation.profopt: if self.config.translation.profoptargs is None: raise Exception("No profoptargs specified, neither in the command line, nor in the target. If the target is not PyPy, please specify profoptargs") + + # Set the correct PGO params based on OS and CC + profopt_gen_flag = "" + profopt_use_flag = "" + profopt_merger = "" + profopt_file = "" + llvm_profdata = "" + + cc = self.translator.platform.cc + + # Locate llvm-profdata + if "clang" in cc: + clang_bin = cc + path = os.environ.get("PATH").split(":") + profdata_found = False + + # Try to find it in $PATH (Darwin and Linux) + for dir in path: + bin = "%s/llvm-profdata" % dir + if os.path.isfile(bin): + llvm_profdata = bin + profdata_found = True + break + + # If not found, try to find it where clang is actually installed (Darwin and Linux) + if not profdata_found: + # If the full path is not given, find where clang is located + if not os.path.isfile(clang_bin): + for dir in path: + bin = "%s/%s" % (dir, cc) + if os.path.isfile(bin): + clang_bin = bin + break + # Some systems install clang elsewhere as a symlink to the real path, + # which is where the related llvm tools are located. + if os.path.islink(clang_bin): + clang_bin = os.path.realpath(clang_bin) # the real clang binary + # llvm-profdata must be in the same directory as clang + llvm_profdata = "%s/llvm-profdata" % os.path.dirname(clang_bin) + profdata_found = os.path.isfile(llvm_profdata) + + # If not found, and Darwin is used, try to find it in the development environment + # More: https://apple.stackexchange.com/questions/197053/ + if not profdata_found and sys.platform == 'darwin': + code = os.system("/usr/bin/xcrun -find llvm-profdata 2>/dev/null") + if code == 0: + llvm_profdata = "/usr/bin/xcrun llvm-profdata" + profdata_found = True + + # If everything failed, throw Exception, sorry + if not profdata_found: + raise Exception( + "Error: Cannot perform profopt build because llvm-profdata was not found in PATH. " + "Please add it to PATH and run the translation again.") + + # Set the PGO flags + if "clang" in cc: + # Any changes made here should be reflected in the GCC+Darwin case below + profopt_gen_flag = "-fprofile-instr-generate" + profopt_use_flag = "-fprofile-instr-use=code.profclangd" + profopt_merger = "%s merge -output=code.profclangd *.profclangr" % llvm_profdata + profopt_file = 'LLVM_PROFILE_FILE="code-%p.profclangr"' + elif "gcc" in cc: + if sys.platform == 'darwin': + profopt_gen_flag = "-fprofile-instr-generate" + profopt_use_flag = "-fprofile-instr-use=code.profclangd" + profopt_merger = "%s merge -output=code.profclangd *.profclangr" % llvm_profdata + profopt_file = 'LLVM_PROFILE_FILE="code-%p.profclangr"' + else: + profopt_gen_flag = "-fprofile-generate" + profopt_use_flag = "-fprofile-use -fprofile-correction" + profopt_merger = "true" + profopt_file = "" + if self.config.translation.shared: mk.rule('$(PROFOPT_TARGET)', '$(TARGET) main.o', '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov') @@ -390,10 +464,11 @@ rules.append( ('profopt', '', [ - '$(MAKE) CFLAGS="-fprofile-generate -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-generate $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', - '%s %s ' % (exe_name, self.config.translation.profoptargs), + '$(MAKE) CFLAGS="%s -fPIC $(CFLAGS)" LDFLAGS="%s $(LDFLAGS)" $(PROFOPT_TARGET)' % (profopt_gen_flag, profopt_gen_flag), + '%s %s %s ' % (profopt_file, exe_name, self.config.translation.profoptargs), + '%s' % (profopt_merger), '$(MAKE) clean_noprof', - '$(MAKE) CFLAGS="-fprofile-use -fprofile-correction -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-use $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', + '$(MAKE) CFLAGS="%s -fPIC $(CFLAGS)" LDFLAGS="%s $(LDFLAGS)" $(PROFOPT_TARGET)' % (profopt_use_flag, profopt_use_flag), ])) for rule in rules: From pypy.commits at gmail.com Wed Jul 19 16:46:40 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:40 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: str_w -> bytes_w/text_w Message-ID: <596fc530.55a4df0a.c9ec2.d12c@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r91928:f31bd1042c09 Date: 2017-07-18 17:25 -0700 http://bitbucket.org/pypy/pypy/changeset/f31bd1042c09/ Log: str_w -> bytes_w/text_w diff --git a/pypy/module/_cppyy/test/test_zjit.py b/pypy/module/_cppyy/test/test_zjit.py --- a/pypy/module/_cppyy/test/test_zjit.py +++ b/pypy/module/_cppyy/test/test_zjit.py @@ -212,7 +212,11 @@ assert isinstance(w_obj, FakeLong) return rarithmetic.r_uint(w_obj.val.touint()) - def str_w(self, w_obj): + def bytes_w(self, w_obj): + assert isinstance(w_obj, FakeString) + return w_obj.val + + def text_w(self, w_obj): assert isinstance(w_obj, FakeString) return w_obj.val From pypy.commits at gmail.com Wed Jul 19 16:46:42 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:42 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: fix indirection error Message-ID: <596fc532.2684df0a.fb663.749a@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r91929:0b40d2587588 Date: 2017-07-18 17:42 -0700 http://bitbucket.org/pypy/pypy/changeset/0b40d2587588/ Log: fix indirection error diff --git a/pypy/module/_cppyy/ffitypes.py b/pypy/module/_cppyy/ffitypes.py --- a/pypy/module/_cppyy/ffitypes.py +++ b/pypy/module/_cppyy/ffitypes.py @@ -82,12 +82,13 @@ value = rffi.cast(rffi.CHAR, space.c_int_w(w_value)) else: - value = space.bytes_w(w_value) + value = space.text_w(w_value) + if len(value) != 1: + raise oefmt(space.w_ValueError, + "char expected, got string of size %d", len(value)) + value = rffi.cast(rffi.CHAR, value[0]) - if len(value) != 1: - raise oefmt(space.w_ValueError, - "char expected, got string of size %d", len(value)) - return value[0] # turn it into a "char" to the annotator + return value # turn it into a "char" to the annotator def cffi_type(self, space): state = space.fromcache(State) From pypy.commits at gmail.com Wed Jul 19 16:46:36 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:36 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: move cppyy -> _cppyy and remove test_cint.py (obsolete) Message-ID: <596fc52c.c2b81c0a.142ed.501e@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r91926:e7b3734b419c Date: 2017-07-18 16:26 -0700 http://bitbucket.org/pypy/pypy/changeset/e7b3734b419c/ Log: move cppyy -> _cppyy and remove test_cint.py (obsolete) diff too long, truncating to 2000 out of 3264 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -36,7 +36,7 @@ "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", - "_csv", "cppyy", "_pypyjson", "_jitlog" + "_csv", "_cppyy", "_pypyjson", "_jitlog" ]) from rpython.jit.backend import detect_cpu @@ -67,8 +67,8 @@ if name in translation_modules: translation_modules.remove(name) - if "cppyy" in working_modules: - working_modules.remove("cppyy") # not tested on win32 + if "_cppyy" in working_modules: + working_modules.remove("_cppyy") # not tested on win32 if "faulthandler" in working_modules: working_modules.remove("faulthandler") # missing details @@ -79,8 +79,8 @@ working_modules.remove('fcntl') # LOCK_NB not defined working_modules.remove("_minimal_curses") working_modules.remove("termios") - if "cppyy" in working_modules: - working_modules.remove("cppyy") # depends on ctypes + if "_cppyy" in working_modules: + working_modules.remove("_cppyy") # depends on ctypes #if sys.platform.startswith("linux"): # _mach = os.popen('uname -m', 'r').read().strip() @@ -92,7 +92,7 @@ '_multiprocessing': [('objspace.usemodules.time', True), ('objspace.usemodules.thread', True)], 'cpyext': [('objspace.usemodules.array', True)], - 'cppyy': [('objspace.usemodules.cpyext', True)], + '_cppyy': [('objspace.usemodules.cpyext', True)], 'faulthandler': [('objspace.usemodules._vmprof', True)], } module_suggests = { diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/_cppyy/__init__.py rename from pypy/module/cppyy/__init__.py rename to pypy/module/_cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/_cppyy/__init__.py @@ -33,11 +33,11 @@ # pythonization functions may be written in RPython, but the interp2app # code generation is not, so give it a chance to run now - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi capi.register_pythonizations(space) def startup(self, space): - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi capi.verify_backend(space) # may raise ImportError space.call_method(self, '_init_pythonify') diff --git a/pypy/module/cppyy/backend/create_cppyy_package.py b/pypy/module/_cppyy/backend/create_cppyy_package.py rename from pypy/module/cppyy/backend/create_cppyy_package.py rename to pypy/module/_cppyy/backend/create_cppyy_package.py diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/_cppyy/bench/Makefile rename from pypy/module/cppyy/bench/Makefile rename to pypy/module/_cppyy/bench/Makefile diff --git a/pypy/module/cppyy/bench/bench02.cxx b/pypy/module/_cppyy/bench/bench02.cxx rename from pypy/module/cppyy/bench/bench02.cxx rename to pypy/module/_cppyy/bench/bench02.cxx diff --git a/pypy/module/cppyy/bench/bench02.h b/pypy/module/_cppyy/bench/bench02.h rename from pypy/module/cppyy/bench/bench02.h rename to pypy/module/_cppyy/bench/bench02.h diff --git a/pypy/module/cppyy/bench/bench02.xml b/pypy/module/_cppyy/bench/bench02.xml rename from pypy/module/cppyy/bench/bench02.xml rename to pypy/module/_cppyy/bench/bench02.xml diff --git a/pypy/module/cppyy/bench/hsimple.C b/pypy/module/_cppyy/bench/hsimple.C rename from pypy/module/cppyy/bench/hsimple.C rename to pypy/module/_cppyy/bench/hsimple.C diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/_cppyy/bench/hsimple.py rename from pypy/module/cppyy/bench/hsimple.py rename to pypy/module/_cppyy/bench/hsimple.py diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/_cppyy/bench/hsimple_rflx.py rename from pypy/module/cppyy/bench/hsimple_rflx.py rename to pypy/module/_cppyy/bench/hsimple_rflx.py diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/_cppyy/capi/__init__.py rename from pypy/module/cppyy/capi/__init__.py rename to pypy/module/_cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/_cppyy/capi/__init__.py @@ -9,10 +9,10 @@ # the selection of the desired backend (default is Reflex). # choose C-API access method: -from pypy.module.cppyy.capi.loadable_capi import * -#from pypy.module.cppyy.capi.builtin_capi import * +from pypy.module._cppyy.capi.loadable_capi import * +#from pypy.module._cppyy.capi.builtin_capi import * -from pypy.module.cppyy.capi.capi_types import C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_OBJECT,\ C_NULL_TYPE, C_NULL_OBJECT def direct_ptradd(ptr, offset): diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/_cppyy/capi/builtin_capi.py rename from pypy/module/cppyy/capi/builtin_capi.py rename to pypy/module/_cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/_cppyy/capi/builtin_capi.py @@ -4,7 +4,7 @@ import cling_capi as backend -from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/_cppyy/capi/capi_types.py rename from pypy/module/cppyy/capi/capi_types.py rename to pypy/module/_cppyy/capi/capi_types.py diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/_cppyy/capi/cling_capi.py rename from pypy/module/cppyy/capi/cling_capi.py rename to pypy/module/_cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/_cppyy/capi/cling_capi.py @@ -11,7 +11,7 @@ from rpython.rlib import jit, libffi, rdynload from pypy.module._rawffi.array import W_ArrayInstance -from pypy.module.cppyy.capi.capi_types import C_OBJECT +from pypy.module._cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -99,7 +99,7 @@ def stdstring_c_str(space, w_self): """Return a python string taking into account \0""" - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) @@ -112,12 +112,12 @@ W_AbstractSeqIterObject.__init__(self, w_vector) # TODO: this should live in rpythonize.py or something so that the # imports can move to the top w/o getting circles - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy assert isinstance(w_vector, interp_cppyy.W_CPPInstance) vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) self.overload = vector.cppclass.get_overload("__getitem__") - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) @@ -131,7 +131,7 @@ self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) - from pypy.module.cppyy import converter + from pypy.module._cppyy import converter self.converter = converter.get_converter(space, v_type, '') self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) self.stride = v_size @@ -143,7 +143,7 @@ self.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) try: - from pypy.module.cppyy import capi # TODO: refector + from pypy.module._cppyy import capi # TODO: refector offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) except OperationError as e: @@ -186,7 +186,7 @@ _method_alias(space, w_pycppclass, "__str__", "c_str") if "vector" in name[:11]: # len('std::vector') == 11 - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi v_type = capi.c_stdvector_valuetype(space, name) if v_type: space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/_cppyy/capi/loadable_capi.py rename from pypy/module/cppyy/capi/loadable_capi.py rename to pypy/module/_cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/_cppyy/capi/loadable_capi.py @@ -9,9 +9,9 @@ from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc from pypy.module._cffi_backend import newtype -from pypy.module.cppyy import ffitypes +from pypy.module._cppyy import ffitypes -from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR @@ -599,7 +599,7 @@ def stdstring_c_str(space, w_self): """Return a python string taking into account \0""" - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) return space.newtext(c_stdstring2charp(space, cppstr._rawobject)) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/_cppyy/converter.py rename from pypy/module/cppyy/converter.py rename to pypy/module/_cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/_cppyy/converter.py @@ -9,7 +9,7 @@ from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance -from pypy.module.cppyy import helper, capi, ffitypes +from pypy.module._cppyy import helper, capi, ffitypes # Converter objects are used to translate between RPython and C++. They are # defined by the type name for which they provide conversion. Uses are for @@ -22,7 +22,7 @@ def get_rawobject(space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: rawobject = cppinstance.get_rawobject() @@ -31,14 +31,14 @@ return capi.C_NULL_OBJECT def set_rawobject(space, w_obj, address): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: assert lltype.typeOf(cppinstance._rawobject) == capi.C_OBJECT cppinstance._rawobject = rffi.cast(capi.C_OBJECT, address) def get_rawobject_nonnull(space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: cppinstance._nullcheck() @@ -56,7 +56,7 @@ except Exception: pass # None or nullptr - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return space.is_true(space.is_(w_obj, space.w_None)) or \ space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) @@ -104,18 +104,18 @@ "no converter available for '%s'", self.name) def cffi_type(self, space): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def default_argument_libffi(self, space, address): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def from_memory(self, space, w_obj, w_pycppclass, offset): @@ -362,7 +362,7 @@ return state.c_voidp def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible class DoubleConverter(ffitypes.typeid(rffi.DOUBLE), FloatTypeConverterMixin, TypeConverter): @@ -442,7 +442,7 @@ address = self._get_raw_address(space, w_obj, offset) ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) if ptrval == 0: - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) arr = space.interp_w(W_Array, letter2tp(space, 'P')) return arr.fromaddress(space, ptrval, sys.maxint) @@ -488,12 +488,12 @@ typecode = 'V' def __init__(self, space, cppclass): - from pypy.module.cppyy.interp_cppyy import W_CPPClass + from pypy.module._cppyy.interp_cppyy import W_CPPClass assert isinstance(cppclass, W_CPPClass) self.cppclass = cppclass def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance if isinstance(w_obj, W_CPPInstance): if capi.c_is_subtype(space, w_obj.cppclass, self.cppclass): rawobject = w_obj.get_rawobject() @@ -521,12 +521,12 @@ class InstanceConverter(InstanceRefConverter): def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible # TODO: by-value is a jit_libffi special case def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False) def to_memory(self, space, w_obj, w_value, offset): @@ -547,7 +547,7 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False) def to_memory(self, space, w_obj, w_value, offset): @@ -570,30 +570,30 @@ def convert_argument_libffi(self, space, w_obj, address, call_local): # TODO: finalize_call not yet called for fast call (see interp_cppyy.py) - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def finalize_call(self, space, w_obj, call_local): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance assert isinstance(w_obj, W_CPPInstance) r = rffi.cast(rffi.VOIDPP, call_local) w_obj._rawobject = rffi.cast(capi.C_OBJECT, r[0]) def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False, is_ref=True) class StdStringConverter(InstanceConverter): def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance if isinstance(w_obj, W_CPPInstance): arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.c_stdstring2stdstring(space, arg) @@ -604,7 +604,7 @@ try: address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) assign = self.cppclass.get_overload("__assign__") - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy assign.call( interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False), [w_value]) except Exception: @@ -619,7 +619,7 @@ typecode = 'V' def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) InstancePtrConverter.__init__(self, space, cppclass) @@ -642,7 +642,7 @@ def convert_argument_libffi(self, space, w_obj, address, call_local): # TODO: free_argument not yet called for fast call (see interp_cppyy.py) - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible # proposed implementation: @@ -709,11 +709,11 @@ # 3) TODO: accept ref as pointer # 4) generalized cases (covers basically all user classes) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, clean_name) if cppclass: # type check for the benefit of the annotator - from pypy.module.cppyy.interp_cppyy import W_CPPClass + from pypy.module._cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) if compound == "*": return InstancePtrConverter(space, cppclass) @@ -874,12 +874,12 @@ class TStringConverter(InstanceConverter): def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, "TString") InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy if isinstance(w_obj, interp_cppyy.W_CPPInstance): arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.backend.c_TString2TString(space, arg) diff --git a/pypy/module/cppyy/executor.py b/pypy/module/_cppyy/executor.py rename from pypy/module/cppyy/executor.py rename to pypy/module/_cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/_cppyy/executor.py @@ -8,7 +8,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_simple_shape from pypy.module._rawffi.array import W_Array, W_ArrayInstance -from pypy.module.cppyy import helper, capi, ffitypes +from pypy.module._cppyy import helper, capi, ffitypes # Executor objects are used to dispatch C++ methods. They are defined by their # return type only: arguments are converted by Converter objects, and Executors @@ -31,7 +31,7 @@ pass def cffi_type(self, space): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def execute(self, space, cppmethod, cppthis, num_args, args): @@ -39,7 +39,7 @@ "return type not available or supported") def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible @@ -58,7 +58,7 @@ ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.newtext(self.typecode))) if ptrval == 0: - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) return arr.fromaddress(space, ptrval, sys.maxint) @@ -138,7 +138,7 @@ class ConstructorExecutor(FunctionExecutor): def execute(self, space, cppmethod, cpptype, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT return space.newlong(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here @@ -156,7 +156,7 @@ return state.c_voidp def execute(self, space, cppmethod, cppthis, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) pyres = interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) @@ -165,34 +165,34 @@ def execute_libffi(self, space, cif_descr, funcaddr, buffer): jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer) result = rffi.ptradd(buffer, cif_descr.exchange_result) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy ptr_result = rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, result)[0]) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) class InstancePtrPtrExecutor(InstancePtrExecutor): def execute(self, space, cppmethod, cppthis, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy voidp_result = capi.c_call_r(space, cppmethod, cppthis, num_args, args) ref_address = rffi.cast(rffi.VOIDPP, voidp_result) ptr_result = rffi.cast(capi.C_OBJECT, ref_address[0]) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible class InstanceExecutor(InstancePtrExecutor): def execute(self, space, cppmethod, cppthis, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args, self.cppclass) ptr_result = rffi.cast(capi.C_OBJECT, long_result) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass, do_cast=False, python_owns=True, fresh=True) def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible @@ -205,13 +205,13 @@ return space.newbytes(pystr) def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible class StdStringRefExecutor(InstancePtrExecutor): def __init__(self, space, cppclass): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) InstancePtrExecutor.__init__(self, space, cppclass) @@ -277,11 +277,11 @@ pass # 3) types/classes, either by ref/ptr or by value - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, clean_name) if cppclass: # type check for the benefit of the annotator - from pypy.module.cppyy.interp_cppyy import W_CPPClass + from pypy.module._cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) if compound == '': return InstanceExecutor(space, cppclass) diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/_cppyy/ffitypes.py rename from pypy/module/cppyy/ffitypes.py rename to pypy/module/_cppyy/ffitypes.py diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/_cppyy/genreflex-methptrgetter.patch rename from pypy/module/cppyy/genreflex-methptrgetter.patch rename to pypy/module/_cppyy/genreflex-methptrgetter.patch diff --git a/pypy/module/cppyy/helper.py b/pypy/module/_cppyy/helper.py rename from pypy/module/cppyy/helper.py rename to pypy/module/_cppyy/helper.py --- a/pypy/module/cppyy/helper.py +++ b/pypy/module/_cppyy/helper.py @@ -64,7 +64,7 @@ _operator_mappings = {} def map_operator_name(space, cppname, nargs, result_type): - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi if cppname[0:8] == "operator": op = cppname[8:].strip(' ') diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/_cppyy/include/capi.h rename from pypy/module/cppyy/include/capi.h rename to pypy/module/_cppyy/include/capi.h diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/_cppyy/include/clingcwrapper.h rename from pypy/module/cppyy/include/clingcwrapper.h rename to pypy/module/_cppyy/include/clingcwrapper.h diff --git a/pypy/module/cppyy/include/cpp_cppyy.h b/pypy/module/_cppyy/include/cpp_cppyy.h rename from pypy/module/cppyy/include/cpp_cppyy.h rename to pypy/module/_cppyy/include/cpp_cppyy.h diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/_cppyy/include/cppyy.h rename from pypy/module/cppyy/include/cppyy.h rename to pypy/module/_cppyy/include/cppyy.h diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/_cppyy/interp_cppyy.py rename from pypy/module/cppyy/interp_cppyy.py rename to pypy/module/_cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/_cppyy/interp_cppyy.py @@ -1,4 +1,4 @@ -import pypy.module.cppyy.capi as capi +import pypy.module._cppyy.capi as capi from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -12,7 +12,7 @@ from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from pypy.module._cffi_backend import ctypefunc -from pypy.module.cppyy import converter, executor, ffitypes, helper +from pypy.module._cppyy import converter, executor, ffitypes, helper class FastCallNotPossible(Exception): diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/_cppyy/pythonify.py rename from pypy/module/cppyy/pythonify.py rename to pypy/module/_cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/_cppyy/pythonify.py @@ -1,5 +1,5 @@ # NOT_RPYTHON -# do not load cppyy here, see _init_pythonify() +# do not load _cppyy here, see _init_pythonify() import types import sys @@ -35,8 +35,8 @@ def _arg_to_str(self, arg): if arg == str: - import cppyy - arg = cppyy._std_string_name() + import _cppyy + arg = _cppyy._std_string_name() elif type(arg) != str: arg = arg.__name__ return arg @@ -99,8 +99,8 @@ else: d = dict() def cpp_proxy_loader(cls): - import cppyy - cpp_proxy = cppyy._scope_byname(cls.__name__ != '::' and cls.__name__ or '') + import _cppyy + cpp_proxy = _cppyy._scope_byname(cls.__name__ != '::' and cls.__name__ or '') del cls.__class__._cpp_proxy cls._cpp_proxy = cpp_proxy return cpp_proxy @@ -126,7 +126,7 @@ setattr(metans, dm_name, cppdm) modname = pycppns.__name__.replace('::', '.') - sys.modules['cppyy.gbl.'+modname] = pycppns + sys.modules['_cppyy.gbl.'+modname] = pycppns return pycppns def _drop_cycles(bases): @@ -141,8 +141,8 @@ def make_new(class_name): def __new__(cls, *args): # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) + import _cppyy + instance = _cppyy.bind_object(0, class_name, True) if not instance.__class__ is cls: instance.__class__ = cls # happens for derived class return instance @@ -202,8 +202,8 @@ # the call to register will add back-end specific pythonizations and thus # needs to run first, so that the generic pythonizations can use them - import cppyy - cppyy._register_class(pycppclass) + import _cppyy + _cppyy._register_class(pycppclass) _pythonize(pycppclass) return pycppclass @@ -212,18 +212,18 @@ def get_pycppitem(scope, name): - import cppyy + import _cppyy # resolve typedefs/aliases full_name = (scope == gbl) and name or (scope.__name__+'::'+name) - true_name = cppyy._resolve_name(full_name) + true_name = _cppyy._resolve_name(full_name) if true_name != full_name: return get_pycppclass(true_name) pycppitem = None # classes - cppitem = cppyy._scope_byname(true_name) + cppitem = _cppyy._scope_byname(true_name) if cppitem: if cppitem.is_namespace(): pycppitem = make_cppnamespace(scope, true_name, cppitem) @@ -233,7 +233,7 @@ # templates if not cppitem: - cppitem = cppyy._template_byname(true_name) + cppitem = _cppyy._template_byname(true_name) if cppitem: pycppitem = make_cpptemplatetype(scope, name) setattr(scope, name, pycppitem) @@ -323,7 +323,7 @@ # general note: use 'in pyclass.__dict__' rather than 'hasattr' to prevent # adding pythonizations multiple times in derived classes - import cppyy + import _cppyy # map __eq__/__ne__ through a comparison to None if '__eq__' in pyclass.__dict__: @@ -362,8 +362,8 @@ # also the fallback on the indexed __getitem__, but that is slower) if not 'vector' in pyclass.__name__[:11] and \ ('begin' in pyclass.__dict__ and 'end' in pyclass.__dict__): - if cppyy._scope_byname(pyclass.__name__+'::iterator') or \ - cppyy._scope_byname(pyclass.__name__+'::const_iterator'): + if _cppyy._scope_byname(pyclass.__name__+'::iterator') or \ + _cppyy._scope_byname(pyclass.__name__+'::const_iterator'): def __iter__(self): i = self.begin() while i != self.end(): @@ -383,7 +383,7 @@ pyclass.__getitem__ = python_style_getitem # string comparisons - if pyclass.__name__ == cppyy._std_string_name(): + if pyclass.__name__ == _cppyy._std_string_name(): def eq(self, other): if type(other) == pyclass: return self.c_str() == other.c_str() @@ -410,29 +410,29 @@ try: return _loaded_dictionaries[name] except KeyError: - import cppyy - lib = cppyy._load_dictionary(name) + import _cppyy + lib = _cppyy._load_dictionary(name) _loaded_dictionaries[name] = lib return lib def _init_pythonify(): - # cppyy should not be loaded at the module level, as that will trigger a - # call to space.getbuiltinmodule(), which will cause cppyy to be loaded - # at pypy-c startup, rather than on the "import cppyy" statement - import cppyy + # _cppyy should not be loaded at the module level, as that will trigger a + # call to space.getbuiltinmodule(), which will cause _cppyy to be loaded + # at pypy-c startup, rather than on the "import _cppyy" statement + import _cppyy # root of all proxy classes: CPPInstance in pythonify exists to combine the # CPPClass meta class with the interp-level CPPInstanceBase global CPPInstance - class CPPInstance(cppyy.CPPInstanceBase): + class CPPInstance(_cppyy.CPPInstanceBase): __metaclass__ = CPPClass pass # class generator callback - cppyy._set_class_generator(clgen_callback) + _cppyy._set_class_generator(clgen_callback) # function generator callback - cppyy._set_function_generator(fngen_callback) + _cppyy._set_function_generator(fngen_callback) # user interface objects (note the two-step of not calling scope_byname here: # creation of global functions may cause the creation of classes in the global @@ -450,14 +450,14 @@ setattr(gbl, 'internal_enum_type_t', int) # install nullptr as a unique reference - setattr(gbl, 'nullptr', cppyy._get_nullptr()) + setattr(gbl, 'nullptr', _cppyy._get_nullptr()) # install for user access - cppyy.gbl = gbl + _cppyy.gbl = gbl # install as modules to allow importing from - sys.modules['cppyy.gbl'] = gbl - sys.modules['cppyy.gbl.std'] = gbl.std + sys.modules['_cppyy.gbl'] = gbl + sys.modules['_cppyy.gbl.std'] = gbl.std # user-defined pythonizations interface _pythonizations = {} diff --git a/pypy/module/cppyy/src/callcontext.h b/pypy/module/_cppyy/src/callcontext.h rename from pypy/module/cppyy/src/callcontext.h rename to pypy/module/_cppyy/src/callcontext.h diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/_cppyy/src/clingcwrapper.cxx rename from pypy/module/cppyy/src/clingcwrapper.cxx rename to pypy/module/_cppyy/src/clingcwrapper.cxx diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/_cppyy/src/dummy_backend.cxx rename from pypy/module/cppyy/src/dummy_backend.cxx rename to pypy/module/_cppyy/src/dummy_backend.cxx diff --git a/pypy/module/cppyy/test/Makefile b/pypy/module/_cppyy/test/Makefile rename from pypy/module/cppyy/test/Makefile rename to pypy/module/_cppyy/test/Makefile diff --git a/pypy/module/cppyy/test/__init__.py b/pypy/module/_cppyy/test/__init__.py rename from pypy/module/cppyy/test/__init__.py rename to pypy/module/_cppyy/test/__init__.py diff --git a/pypy/module/cppyy/test/advancedcpp.cxx b/pypy/module/_cppyy/test/advancedcpp.cxx rename from pypy/module/cppyy/test/advancedcpp.cxx rename to pypy/module/_cppyy/test/advancedcpp.cxx diff --git a/pypy/module/cppyy/test/advancedcpp.h b/pypy/module/_cppyy/test/advancedcpp.h rename from pypy/module/cppyy/test/advancedcpp.h rename to pypy/module/_cppyy/test/advancedcpp.h diff --git a/pypy/module/cppyy/test/advancedcpp.xml b/pypy/module/_cppyy/test/advancedcpp.xml rename from pypy/module/cppyy/test/advancedcpp.xml rename to pypy/module/_cppyy/test/advancedcpp.xml diff --git a/pypy/module/cppyy/test/advancedcpp2.cxx b/pypy/module/_cppyy/test/advancedcpp2.cxx rename from pypy/module/cppyy/test/advancedcpp2.cxx rename to pypy/module/_cppyy/test/advancedcpp2.cxx diff --git a/pypy/module/cppyy/test/advancedcpp2.h b/pypy/module/_cppyy/test/advancedcpp2.h rename from pypy/module/cppyy/test/advancedcpp2.h rename to pypy/module/_cppyy/test/advancedcpp2.h diff --git a/pypy/module/cppyy/test/advancedcpp2.xml b/pypy/module/_cppyy/test/advancedcpp2.xml rename from pypy/module/cppyy/test/advancedcpp2.xml rename to pypy/module/_cppyy/test/advancedcpp2.xml diff --git a/pypy/module/cppyy/test/advancedcpp2_LinkDef.h b/pypy/module/_cppyy/test/advancedcpp2_LinkDef.h rename from pypy/module/cppyy/test/advancedcpp2_LinkDef.h rename to pypy/module/_cppyy/test/advancedcpp2_LinkDef.h diff --git a/pypy/module/cppyy/test/advancedcpp_LinkDef.h b/pypy/module/_cppyy/test/advancedcpp_LinkDef.h rename from pypy/module/cppyy/test/advancedcpp_LinkDef.h rename to pypy/module/_cppyy/test/advancedcpp_LinkDef.h diff --git a/pypy/module/cppyy/test/bench1.cxx b/pypy/module/_cppyy/test/bench1.cxx rename from pypy/module/cppyy/test/bench1.cxx rename to pypy/module/_cppyy/test/bench1.cxx diff --git a/pypy/module/cppyy/test/bench1.py b/pypy/module/_cppyy/test/bench1.py rename from pypy/module/cppyy/test/bench1.py rename to pypy/module/_cppyy/test/bench1.py diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/_cppyy/test/conftest.py rename from pypy/module/cppyy/test/conftest.py rename to pypy/module/_cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/_cppyy/test/conftest.py @@ -3,7 +3,7 @@ @py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: - import pypy.module.cppyy.capi.loadable_capi as lcapi + import pypy.module._cppyy.capi.loadable_capi as lcapi if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex @@ -30,7 +30,7 @@ def pytest_configure(config): if py.path.local.sysfind('genreflex') is None: - import pypy.module.cppyy.capi.loadable_capi as lcapi + import pypy.module._cppyy.capi.loadable_capi as lcapi try: import ctypes ctypes.CDLL(lcapi.reflection_library) diff --git a/pypy/module/cppyy/test/crossing.cxx b/pypy/module/_cppyy/test/crossing.cxx rename from pypy/module/cppyy/test/crossing.cxx rename to pypy/module/_cppyy/test/crossing.cxx diff --git a/pypy/module/cppyy/test/crossing.h b/pypy/module/_cppyy/test/crossing.h rename from pypy/module/cppyy/test/crossing.h rename to pypy/module/_cppyy/test/crossing.h diff --git a/pypy/module/cppyy/test/crossing.xml b/pypy/module/_cppyy/test/crossing.xml rename from pypy/module/cppyy/test/crossing.xml rename to pypy/module/_cppyy/test/crossing.xml diff --git a/pypy/module/cppyy/test/crossing_LinkDef.h b/pypy/module/_cppyy/test/crossing_LinkDef.h rename from pypy/module/cppyy/test/crossing_LinkDef.h rename to pypy/module/_cppyy/test/crossing_LinkDef.h diff --git a/pypy/module/cppyy/test/datatypes.cxx b/pypy/module/_cppyy/test/datatypes.cxx rename from pypy/module/cppyy/test/datatypes.cxx rename to pypy/module/_cppyy/test/datatypes.cxx diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/_cppyy/test/datatypes.h rename from pypy/module/cppyy/test/datatypes.h rename to pypy/module/_cppyy/test/datatypes.h diff --git a/pypy/module/cppyy/test/datatypes.xml b/pypy/module/_cppyy/test/datatypes.xml rename from pypy/module/cppyy/test/datatypes.xml rename to pypy/module/_cppyy/test/datatypes.xml diff --git a/pypy/module/cppyy/test/datatypes_LinkDef.h b/pypy/module/_cppyy/test/datatypes_LinkDef.h rename from pypy/module/cppyy/test/datatypes_LinkDef.h rename to pypy/module/_cppyy/test/datatypes_LinkDef.h diff --git a/pypy/module/cppyy/test/example01.cxx b/pypy/module/_cppyy/test/example01.cxx rename from pypy/module/cppyy/test/example01.cxx rename to pypy/module/_cppyy/test/example01.cxx diff --git a/pypy/module/cppyy/test/example01.h b/pypy/module/_cppyy/test/example01.h rename from pypy/module/cppyy/test/example01.h rename to pypy/module/_cppyy/test/example01.h diff --git a/pypy/module/cppyy/test/example01.xml b/pypy/module/_cppyy/test/example01.xml rename from pypy/module/cppyy/test/example01.xml rename to pypy/module/_cppyy/test/example01.xml diff --git a/pypy/module/cppyy/test/example01_LinkDef.h b/pypy/module/_cppyy/test/example01_LinkDef.h rename from pypy/module/cppyy/test/example01_LinkDef.h rename to pypy/module/_cppyy/test/example01_LinkDef.h diff --git a/pypy/module/cppyy/test/fragile.cxx b/pypy/module/_cppyy/test/fragile.cxx rename from pypy/module/cppyy/test/fragile.cxx rename to pypy/module/_cppyy/test/fragile.cxx diff --git a/pypy/module/cppyy/test/fragile.h b/pypy/module/_cppyy/test/fragile.h rename from pypy/module/cppyy/test/fragile.h rename to pypy/module/_cppyy/test/fragile.h diff --git a/pypy/module/cppyy/test/fragile.xml b/pypy/module/_cppyy/test/fragile.xml rename from pypy/module/cppyy/test/fragile.xml rename to pypy/module/_cppyy/test/fragile.xml diff --git a/pypy/module/cppyy/test/fragile_LinkDef.h b/pypy/module/_cppyy/test/fragile_LinkDef.h rename from pypy/module/cppyy/test/fragile_LinkDef.h rename to pypy/module/_cppyy/test/fragile_LinkDef.h diff --git a/pypy/module/cppyy/test/iotypes.cxx b/pypy/module/_cppyy/test/iotypes.cxx rename from pypy/module/cppyy/test/iotypes.cxx rename to pypy/module/_cppyy/test/iotypes.cxx diff --git a/pypy/module/cppyy/test/iotypes.h b/pypy/module/_cppyy/test/iotypes.h rename from pypy/module/cppyy/test/iotypes.h rename to pypy/module/_cppyy/test/iotypes.h diff --git a/pypy/module/cppyy/test/iotypes.xml b/pypy/module/_cppyy/test/iotypes.xml rename from pypy/module/cppyy/test/iotypes.xml rename to pypy/module/_cppyy/test/iotypes.xml diff --git a/pypy/module/cppyy/test/iotypes_LinkDef.h b/pypy/module/_cppyy/test/iotypes_LinkDef.h rename from pypy/module/cppyy/test/iotypes_LinkDef.h rename to pypy/module/_cppyy/test/iotypes_LinkDef.h diff --git a/pypy/module/cppyy/test/operators.cxx b/pypy/module/_cppyy/test/operators.cxx rename from pypy/module/cppyy/test/operators.cxx rename to pypy/module/_cppyy/test/operators.cxx diff --git a/pypy/module/cppyy/test/operators.h b/pypy/module/_cppyy/test/operators.h rename from pypy/module/cppyy/test/operators.h rename to pypy/module/_cppyy/test/operators.h diff --git a/pypy/module/cppyy/test/operators.xml b/pypy/module/_cppyy/test/operators.xml rename from pypy/module/cppyy/test/operators.xml rename to pypy/module/_cppyy/test/operators.xml diff --git a/pypy/module/cppyy/test/operators_LinkDef.h b/pypy/module/_cppyy/test/operators_LinkDef.h rename from pypy/module/cppyy/test/operators_LinkDef.h rename to pypy/module/_cppyy/test/operators_LinkDef.h diff --git a/pypy/module/cppyy/test/overloads.cxx b/pypy/module/_cppyy/test/overloads.cxx rename from pypy/module/cppyy/test/overloads.cxx rename to pypy/module/_cppyy/test/overloads.cxx diff --git a/pypy/module/cppyy/test/overloads.h b/pypy/module/_cppyy/test/overloads.h rename from pypy/module/cppyy/test/overloads.h rename to pypy/module/_cppyy/test/overloads.h diff --git a/pypy/module/cppyy/test/overloads.xml b/pypy/module/_cppyy/test/overloads.xml rename from pypy/module/cppyy/test/overloads.xml rename to pypy/module/_cppyy/test/overloads.xml diff --git a/pypy/module/cppyy/test/overloads_LinkDef.h b/pypy/module/_cppyy/test/overloads_LinkDef.h rename from pypy/module/cppyy/test/overloads_LinkDef.h rename to pypy/module/_cppyy/test/overloads_LinkDef.h diff --git a/pypy/module/cppyy/test/simple_class.C b/pypy/module/_cppyy/test/simple_class.C rename from pypy/module/cppyy/test/simple_class.C rename to pypy/module/_cppyy/test/simple_class.C diff --git a/pypy/module/cppyy/test/std_streams.cxx b/pypy/module/_cppyy/test/std_streams.cxx rename from pypy/module/cppyy/test/std_streams.cxx rename to pypy/module/_cppyy/test/std_streams.cxx diff --git a/pypy/module/cppyy/test/std_streams.h b/pypy/module/_cppyy/test/std_streams.h rename from pypy/module/cppyy/test/std_streams.h rename to pypy/module/_cppyy/test/std_streams.h diff --git a/pypy/module/cppyy/test/std_streams.xml b/pypy/module/_cppyy/test/std_streams.xml rename from pypy/module/cppyy/test/std_streams.xml rename to pypy/module/_cppyy/test/std_streams.xml diff --git a/pypy/module/cppyy/test/std_streams_LinkDef.h b/pypy/module/_cppyy/test/std_streams_LinkDef.h rename from pypy/module/cppyy/test/std_streams_LinkDef.h rename to pypy/module/_cppyy/test/std_streams_LinkDef.h diff --git a/pypy/module/cppyy/test/stltypes.cxx b/pypy/module/_cppyy/test/stltypes.cxx rename from pypy/module/cppyy/test/stltypes.cxx rename to pypy/module/_cppyy/test/stltypes.cxx diff --git a/pypy/module/cppyy/test/stltypes.h b/pypy/module/_cppyy/test/stltypes.h rename from pypy/module/cppyy/test/stltypes.h rename to pypy/module/_cppyy/test/stltypes.h diff --git a/pypy/module/cppyy/test/stltypes.xml b/pypy/module/_cppyy/test/stltypes.xml rename from pypy/module/cppyy/test/stltypes.xml rename to pypy/module/_cppyy/test/stltypes.xml diff --git a/pypy/module/cppyy/test/stltypes_LinkDef.h b/pypy/module/_cppyy/test/stltypes_LinkDef.h rename from pypy/module/cppyy/test/stltypes_LinkDef.h rename to pypy/module/_cppyy/test/stltypes_LinkDef.h diff --git a/pypy/module/cppyy/test/support.py b/pypy/module/_cppyy/test/support.py rename from pypy/module/cppyy/test/support.py rename to pypy/module/_cppyy/test/support.py --- a/pypy/module/cppyy/test/support.py +++ b/pypy/module/_cppyy/test/support.py @@ -6,7 +6,7 @@ def setup_make(targetname): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - import pypy.module.cppyy.capi.loadable_capi as lcapi + import pypy.module._cppyy.capi.loadable_capi as lcapi popen = subprocess.Popen(["make", targetname], cwd=str(currpath), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = popen.communicate() diff --git a/pypy/module/cppyy/test/test_aclassloader.py b/pypy/module/_cppyy/test/test_aclassloader.py rename from pypy/module/cppyy/test/test_aclassloader.py rename to pypy/module/_cppyy/test/test_aclassloader.py --- a/pypy/module/cppyy/test/test_aclassloader.py +++ b/pypy/module/_cppyy/test/test_aclassloader.py @@ -12,17 +12,17 @@ class AppTestACLASSLOADER: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): cls.space.appexec([], """(): - import cppyy""") + import _cppyy""") def test01_class_autoloading(self): """Test whether a class can be found through .rootmap.""" - import cppyy - example01_class = cppyy.gbl.example01 + import _cppyy + example01_class = _cppyy.gbl.example01 assert example01_class - cl2 = cppyy.gbl.example01 + cl2 = _cppyy.gbl.example01 assert cl2 assert example01_class is cl2 diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/_cppyy/test/test_advancedcpp.py rename from pypy/module/cppyy/test/test_advancedcpp.py rename to pypy/module/_cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/_cppyy/test/test_advancedcpp.py @@ -1,6 +1,6 @@ import py, os, sys -from pypy.module.cppyy import capi +from pypy.module._cppyy import capi currpath = py.path.local(__file__).dirpath() @@ -15,21 +15,21 @@ raise OSError("'make' failed (see stderr)") class AppTestADVANCEDCPP: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.newtext(test_dct) cls.w_capi_identity = cls.space.newtext(capi.identify()) cls.w_advanced = cls.space.appexec([], """(): - import cppyy - return cppyy.load_reflection_info(%r)""" % (test_dct, )) + import _cppyy + return _cppyy.load_reflection_info(%r)""" % (test_dct, )) def test01_default_arguments(self): """Test usage of default arguments""" - import cppyy + import _cppyy def test_defaulter(n, t): - defaulter = getattr(cppyy.gbl, '%s_defaulter' % n) + defaulter = getattr(_cppyy.gbl, '%s_defaulter' % n) d = defaulter() assert d.m_a == t(11) @@ -68,9 +68,9 @@ def test02_simple_inheritance(self): """Test binding of a basic inheritance structure""" - import cppyy - base_class = cppyy.gbl.base_class - derived_class = cppyy.gbl.derived_class + import _cppyy + base_class = _cppyy.gbl.base_class + derived_class = _cppyy.gbl.derived_class assert issubclass(derived_class, base_class) assert not issubclass(base_class, derived_class) @@ -122,8 +122,8 @@ def test03_namespaces(self): """Test access to namespaces and inner classes""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl assert gbl.a_ns is gbl.a_ns assert gbl.a_ns.d_ns is gbl.a_ns.d_ns @@ -149,10 +149,10 @@ def test03a_namespace_lookup_on_update(self): """Test whether namespaces can be shared across dictionaries.""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl - lib2 = cppyy.load_reflection_info("advancedcpp2Dict.so") + lib2 = _cppyy.load_reflection_info("advancedcpp2Dict.so") assert gbl.a_ns is gbl.a_ns assert gbl.a_ns.d_ns is gbl.a_ns.d_ns @@ -178,8 +178,8 @@ def test04_template_types(self): """Test bindings of templated types""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl assert gbl.T1 is gbl.T1 assert gbl.T2 is gbl.T2 @@ -244,8 +244,8 @@ def test05_abstract_classes(self): """Test non-instatiatability of abstract classes""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl raises(TypeError, gbl.a_class) raises(TypeError, gbl.some_abstract_class) @@ -259,12 +259,12 @@ def test06_datamembers(self): """Test data member access when using virtual inheritence""" - import cppyy - a_class = cppyy.gbl.a_class - b_class = cppyy.gbl.b_class - c_class_1 = cppyy.gbl.c_class_1 - c_class_2 = cppyy.gbl.c_class_2 - d_class = cppyy.gbl.d_class + import _cppyy + a_class = _cppyy.gbl.a_class + b_class = _cppyy.gbl.b_class + c_class_1 = _cppyy.gbl.c_class_1 + c_class_2 = _cppyy.gbl.c_class_2 + d_class = _cppyy.gbl.d_class assert issubclass(b_class, a_class) assert issubclass(c_class_1, a_class) @@ -353,8 +353,8 @@ def test07_pass_by_reference(self): """Test reference passing when using virtual inheritance""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl b_class = gbl.b_class c_class = gbl.c_class_2 d_class = gbl.d_class @@ -386,71 +386,71 @@ def test08_void_pointer_passing(self): """Test passing of variants of void pointer arguments""" - import cppyy - pointer_pass = cppyy.gbl.pointer_pass - some_concrete_class = cppyy.gbl.some_concrete_class + import _cppyy + pointer_pass = _cppyy.gbl.pointer_pass + some_concrete_class = _cppyy.gbl.some_concrete_class pp = pointer_pass() o = some_concrete_class() - assert cppyy.addressof(o) == pp.gime_address_ptr(o) - assert cppyy.addressof(o) == pp.gime_address_ptr_ptr(o) - assert cppyy.addressof(o) == pp.gime_address_ptr_ref(o) + assert _cppyy.addressof(o) == pp.gime_address_ptr(o) + assert _cppyy.addressof(o) == pp.gime_address_ptr_ptr(o) + assert _cppyy.addressof(o) == pp.gime_address_ptr_ref(o) import array - addressofo = array.array('l', [cppyy.addressof(o)]) + addressofo = array.array('l', [_cppyy.addressof(o)]) assert addressofo.buffer_info()[0] == pp.gime_address_ptr_ptr(addressofo) assert 0 == pp.gime_address_ptr(0) assert 0 == pp.gime_address_ptr(None) - ptr = cppyy.bind_object(0, some_concrete_class) - assert cppyy.addressof(ptr) == 0 + ptr = _cppyy.bind_object(0, some_concrete_class) + assert _cppyy.addressof(ptr) == 0 pp.set_address_ptr_ref(ptr) - assert cppyy.addressof(ptr) == 0x1234 + assert _cppyy.addressof(ptr) == 0x1234 pp.set_address_ptr_ptr(ptr) - assert cppyy.addressof(ptr) == 0x4321 + assert _cppyy.addressof(ptr) == 0x4321 def test09_opaque_pointer_passing(self): """Test passing around of opaque pointers""" - import cppyy - some_concrete_class = cppyy.gbl.some_concrete_class + import _cppyy + some_concrete_class = _cppyy.gbl.some_concrete_class o = some_concrete_class() # TODO: figure out the PyPy equivalent of CObject (may have to do this # through the C-API from C++) - #cobj = cppyy.as_cobject(o) - addr = cppyy.addressof(o) + #cobj = _cppyy.as_cobject(o) + addr = _cppyy.addressof(o) - #assert o == cppyy.bind_object(cobj, some_concrete_class) - #assert o == cppyy.bind_object(cobj, type(o)) - #assert o == cppyy.bind_object(cobj, o.__class__) - #assert o == cppyy.bind_object(cobj, "some_concrete_class") - assert cppyy.addressof(o) == cppyy.addressof(cppyy.bind_object(addr, some_concrete_class)) - assert o == cppyy.bind_object(addr, some_concrete_class) - assert o == cppyy.bind_object(addr, type(o)) - assert o == cppyy.bind_object(addr, o.__class__) - assert o == cppyy.bind_object(addr, "some_concrete_class") - raises(TypeError, cppyy.bind_object, addr, "does_not_exist") - raises(TypeError, cppyy.bind_object, addr, 1) + #assert o == _cppyy.bind_object(cobj, some_concrete_class) + #assert o == _cppyy.bind_object(cobj, type(o)) + #assert o == _cppyy.bind_object(cobj, o.__class__) + #assert o == _cppyy.bind_object(cobj, "some_concrete_class") + assert _cppyy.addressof(o) == _cppyy.addressof(_cppyy.bind_object(addr, some_concrete_class)) + assert o == _cppyy.bind_object(addr, some_concrete_class) + assert o == _cppyy.bind_object(addr, type(o)) + assert o == _cppyy.bind_object(addr, o.__class__) + assert o == _cppyy.bind_object(addr, "some_concrete_class") + raises(TypeError, _cppyy.bind_object, addr, "does_not_exist") + raises(TypeError, _cppyy.bind_object, addr, 1) def test10_object_identity(self): """Test object identity""" - import cppyy - some_concrete_class = cppyy.gbl.some_concrete_class - some_class_with_data = cppyy.gbl.some_class_with_data + import _cppyy + some_concrete_class = _cppyy.gbl.some_concrete_class + some_class_with_data = _cppyy.gbl.some_class_with_data o = some_concrete_class() - addr = cppyy.addressof(o) + addr = _cppyy.addressof(o) - o2 = cppyy.bind_object(addr, some_concrete_class) + o2 = _cppyy.bind_object(addr, some_concrete_class) assert o is o2 - o3 = cppyy.bind_object(addr, some_class_with_data) + o3 = _cppyy.bind_object(addr, some_class_with_data) assert not o is o3 d1 = some_class_with_data() @@ -471,11 +471,11 @@ def test11_multi_methods(self): """Test calling of methods from multiple inheritance""" - import cppyy - multi = cppyy.gbl.multi + import _cppyy + multi = _cppyy.gbl.multi - assert cppyy.gbl.multi1 is multi.__bases__[0] - assert cppyy.gbl.multi2 is multi.__bases__[1] + assert _cppyy.gbl.multi1 is multi.__bases__[0] + assert _cppyy.gbl.multi2 is multi.__bases__[1] dict_keys = multi.__dict__.keys() assert dict_keys.count('get_my_own_int') == 1 @@ -490,9 +490,9 @@ def test12_actual_type(self): """Test that a pointer to base return does an auto-downcast""" - import cppyy - base_class = cppyy.gbl.base_class - derived_class = cppyy.gbl.derived_class + import _cppyy + base_class = _cppyy.gbl.base_class + derived_class = _cppyy.gbl.derived_class b = base_class() d = derived_class() @@ -519,30 +519,30 @@ assert not isinstance(voidp, base_class) assert not isinstance(voidp, derived_class) - d1 = cppyy.bind_object(voidp, base_class, cast=True) + d1 = _cppyy.bind_object(voidp, base_class, cast=True) assert isinstance(d1, derived_class) assert d1 is d - b1 = cppyy.bind_object(voidp, base_class) + b1 = _cppyy.bind_object(voidp, base_class) assert isinstance(b1, base_class) - assert cppyy.addressof(b1) == cppyy.addressof(d) + assert _cppyy.addressof(b1) == _cppyy.addressof(d) assert not (b1 is d) def test13_actual_type_virtual_multi(self): """Test auto-downcast in adverse inheritance situation""" - import cppyy + import _cppyy - c1 = cppyy.gbl.create_c1() - assert type(c1) == cppyy.gbl.c_class_1 + c1 = _cppyy.gbl.create_c1() + assert type(c1) == _cppyy.gbl.c_class_1 assert c1.m_c == 3 c1.destruct() if self.capi_identity == 'CINT': # CINT does not support dynamic casts return - c2 = cppyy.gbl.create_c2() - assert type(c2) == cppyy.gbl.c_class_2 + c2 = _cppyy.gbl.create_c2() + assert type(c2) == _cppyy.gbl.c_class_2 assert c2.m_c == 3 c2.destruct() @@ -558,11 +558,11 @@ if self.capi_identity != 'CINT': # don't test anything for Reflex return - import cppyy + import _cppyy - assert cppyy.gbl.new_overloader.s_instances == 0 - nl = cppyy.gbl.new_overloader() - assert cppyy.gbl.new_overloader.s_instances == 1 + assert _cppyy.gbl.new_overloader.s_instances == 0 + nl = _cppyy.gbl.new_overloader() + assert _cppyy.gbl.new_overloader.s_instances == 1 nl.destruct() if self.capi_identity == 'CINT': # do not test delete @@ -570,16 +570,16 @@ import gc gc.collect() - assert cppyy.gbl.new_overloader.s_instances == 0 + assert _cppyy.gbl.new_overloader.s_instances == 0 def test15_template_instantiation_with_vector_of_float(self): """Test template instantiation with a std::vector""" - import cppyy + import _cppyy # the following will simply fail if there is a naming problem (e.g. # std::, allocator, etc., etc.); note the parsing required ... - b = cppyy.gbl.my_templated_class(cppyy.gbl.std.vector(float))() + b = _cppyy.gbl.my_templated_class(_cppyy.gbl.std.vector(float))() for i in range(5): b.m_b.push_back(i) @@ -588,9 +588,9 @@ def test16_template_member_functions(self): """Test template member functions lookup and calls""" - import cppyy + import _cppyy - m = cppyy.gbl.my_templated_method_class() + m = _cppyy.gbl.my_templated_method_class() assert m.get_size('char')() == m.get_char_size() assert m.get_size(int)() == m.get_int_size() @@ -603,9 +603,9 @@ def test17_template_global_functions(self): """Test template global function lookup and calls""" - import cppyy + import _cppyy - f = cppyy.gbl.my_templated_function + f = _cppyy.gbl.my_templated_function assert f('c') == 'c' assert type(f('c')) == type('c') @@ -615,7 +615,7 @@ def test18_assign_to_return_byref( self ): """Test assignment to an instance returned by reference""" - from cppyy import gbl + from _cppyy import gbl a = gbl.std.vector(gbl.ref_tester)() a.push_back(gbl.ref_tester(42)) @@ -631,7 +631,7 @@ def test19_math_converters(self): """Test operator int/long/double incl. typedef""" - from cppyy import gbl + from _cppyy import gbl a = gbl.some_convertible() a.m_i = 1234 @@ -647,7 +647,7 @@ def test20_comparator(self): """Check that the global operator!=/== is picked up""" - from cppyy import gbl + from _cppyy import gbl a, b = gbl.some_comparable(), gbl.some_comparable() @@ -665,18 +665,18 @@ def test21_overload_order_with_proper_return(self): """Test return type against proper overload w/ const and covariance""" - import cppyy + import _cppyy - assert cppyy.gbl.overload_one_way().gime() == 1 - assert cppyy.gbl.overload_the_other_way().gime() == "aap" + assert _cppyy.gbl.overload_one_way().gime() == 1 + assert _cppyy.gbl.overload_the_other_way().gime() == "aap" def test22_access_to_global_variables(self): """Access global_variables_and_pointers""" - import cppyy + import _cppyy - assert cppyy.gbl.my_global_double == 12. - assert len(cppyy.gbl.my_global_array) == 500 + assert _cppyy.gbl.my_global_double == 12. + assert len(_cppyy.gbl.my_global_array) == 500 # TODO: currently fails b/c double** not understood as &double* - #assert cppyy.gbl.my_global_ptr[0] == 1234. + #assert _cppyy.gbl.my_global_ptr[0] == 1234. diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/_cppyy/test/test_cppyy.py rename from pypy/module/cppyy/test/test_cppyy.py rename to pypy/module/_cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/_cppyy/test/test_cppyy.py @@ -1,7 +1,7 @@ import py, os, sys import subprocess -from pypy.module.cppyy import interp_cppyy, executor +from pypy.module._cppyy import interp_cppyy, executor from .support import setup_make @@ -27,13 +27,13 @@ class AppTestCPPYY: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): cls.w_example01, cls.w_payload = cls.space.unpackiterable(cls.space.appexec([], """(): - import cppyy - cppyy.load_reflection_info(%r) - return cppyy._scope_byname('example01'), cppyy._scope_byname('payload')""" % (test_dct, ))) + import _cppyy + _cppyy.load_reflection_info(%r) + return _cppyy._scope_byname('example01'), _cppyy._scope_byname('payload')""" % (test_dct, ))) def test01_static_int(self): """Test passing of an int, returning of an int, and overloading on a @@ -86,7 +86,7 @@ def test04_method_int(self): """Test passing of a int, returning of a int, and memory cleanup, on a method.""" - import cppyy + import _cppyy t = self.example01 @@ -119,7 +119,7 @@ """Test memory destruction and integrity.""" import gc - import cppyy + import _cppyy t = self.example01 @@ -150,7 +150,7 @@ def test05a_memory2(self): """Test ownership control.""" - import gc, cppyy + import gc, _cppyy t = self.example01 @@ -171,7 +171,7 @@ def test06_method_double(self): """Test passing of a double and returning of double on a method.""" - import cppyy + import _cppyy t = self.example01 @@ -189,7 +189,7 @@ def test07_method_constcharp(self): """Test passing of a C string and returning of a C string on a method.""" - import cppyy + import _cppyy t = self.example01 @@ -205,7 +205,7 @@ def test08_pass_object_by_pointer(self): """Test passing of an instance as an argument.""" - import cppyy + import _cppyy t1 = self.example01 t2 = self.payload @@ -225,7 +225,7 @@ def test09_return_object_by_pointer(self): """Test returning of an instance as an argument.""" - import cppyy + import _cppyy t1 = self.example01 t2 = self.payload diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/_cppyy/test/test_crossing.py rename from pypy/module/cppyy/test/test_crossing.py rename to pypy/module/_cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/_cppyy/test/test_crossing.py @@ -68,14 +68,14 @@ return str(pydname) class AppTestCrossing: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): - # cppyy specific additions (note that test_dct is loaded late + # _cppyy specific additions (note that test_dct is loaded late # to allow the generated extension module be loaded first) cls.w_test_dct = cls.space.newtext(test_dct) cls.w_pre_imports = cls.space.appexec([], """(): - import ctypes, cppyy""") # prevents leak-checking complaints on ctypes' statics + import ctypes, _cppyy""") # prevents leak-checking complaints on ctypes' statics def setup_method(self, func): @unwrap_spec(name='text', init='text', body='text') @@ -145,11 +145,11 @@ def test02_crossing_dict(self): """Test availability of all needed classes in the dict""" - import cppyy - cppyy.load_reflection_info(self.test_dct) + import _cppyy + _cppyy.load_reflection_info(self.test_dct) - assert cppyy.gbl.crossing == cppyy.gbl.crossing - crossing = cppyy.gbl.crossing + assert _cppyy.gbl.crossing == _cppyy.gbl.crossing + crossing = _cppyy.gbl.crossing assert crossing.A == crossing.A @@ -157,8 +157,8 @@ def test03_send_pyobject(self): """Test sending a true pyobject to C++""" - import cppyy - crossing = cppyy.gbl.crossing + import _cppyy + crossing = _cppyy.gbl.crossing a = crossing.A() assert a.unwrap(13) == 13 @@ -167,8 +167,8 @@ def test04_send_and_receive_pyobject(self): """Test receiving a true pyobject from C++""" - import cppyy - crossing = cppyy.gbl.crossing + import _cppyy + crossing = _cppyy.gbl.crossing a = crossing.A() diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/_cppyy/test/test_datatypes.py rename from pypy/module/cppyy/test/test_datatypes.py rename to pypy/module/_cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/_cppyy/test/test_datatypes.py @@ -9,26 +9,26 @@ setup_make("datatypesDict.so") class AppTestDATATYPES: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.newtext(test_dct) cls.w_datatypes = cls.space.appexec([], """(): - import cppyy - return cppyy.load_reflection_info(%r)""" % (test_dct, )) + import _cppyy + return _cppyy.load_reflection_info(%r)""" % (test_dct, )) cls.w_N = cls.space.newint(5) # should be imported from the dictionary def test01_load_reflection_cache(self): """Loading reflection info twice should result in the same object""" - import cppyy - lib2 = cppyy.load_reflection_info(self.test_dct) + import _cppyy + lib2 = _cppyy.load_reflection_info(self.test_dct) assert self.datatypes is lib2 def test02_instance_data_read_access(self): """Read access to instance public data and verify values""" - import cppyy - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -115,8 +115,8 @@ def test03_instance_data_write_access(self): """Test write access to instance public data and verify values""" - import cppyy - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -202,8 +202,8 @@ def test04_array_passing(self): """Test passing of array arguments""" - import cppyy, array, sys - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy, array, sys + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -232,16 +232,16 @@ raises(Exception, c.pass_array(0).__getitem__, 0) # raises SegfaultException assert not c.pass_array(None) raises(Exception, c.pass_array(None).__getitem__, 0) # id. - assert not c.pass_array(cppyy.gbl.nullptr) - raises(Exception, c.pass_array(cppyy.gbl.nullptr).__getitem__, 0) # id. id. + assert not c.pass_array(_cppyy.gbl.nullptr) + raises(Exception, c.pass_array(_cppyy.gbl.nullptr).__getitem__, 0) # id. id. c.destruct() def test05_class_read_access(self): """Test read access to class public data and verify values""" - import cppyy, sys - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy, sys + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -281,8 +281,8 @@ def test06_class_data_write_access(self): """Test write access to class public data and verify values""" - import cppyy, sys - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy, sys + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -345,8 +345,8 @@ def test07_range_access(self): """Test the ranges of integer types""" - import cppyy, sys - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy, sys + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -361,8 +361,8 @@ def test08_type_conversions(self): """Test conversions between builtin types""" - import cppyy, sys - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy, sys + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -379,8 +379,8 @@ def test09_global_builtin_type(self): """Test access to a global builtin type""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl assert gbl.g_int == gbl.get_global_int() @@ -395,8 +395,8 @@ def test10_global_ptr(self): """Test access of global objects through a pointer""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl raises(ReferenceError, 'gbl.g_pod.m_int') @@ -426,10 +426,10 @@ def test11_enum(self): """Test access to enums""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl - CppyyTestData = cppyy.gbl.CppyyTestData + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -474,8 +474,8 @@ def test12_string_passing(self): """Test passing/returning of a const char*""" - import cppyy - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert c.get_valid_string('aap') == 'aap' @@ -484,8 +484,8 @@ def test13_copy_contructor(self): """Test copy constructor""" - import cppyy - FourVector = cppyy.gbl.FourVector + import _cppyy + FourVector = _cppyy.gbl.FourVector t1 = FourVector(1., 2., 3., -4.) t2 = FourVector(0., 0., 0., 0.) @@ -500,9 +500,9 @@ def test14_object_returns(self): """Test access to and return of PODs""" - import cppyy + import _cppyy - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() assert c.m_pod.m_int == 888 assert c.m_pod.m_double == 3.14 @@ -527,13 +527,13 @@ def test15_object_arguments(self): """Test setting and returning of a POD through arguments""" - import cppyy + import _cppyy - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() assert c.m_pod.m_int == 888 assert c.m_pod.m_double == 3.14 - p = cppyy.gbl.CppyyTestPod() + p = _cppyy.gbl.CppyyTestPod() p.m_int = 123 assert p.m_int == 123 p.m_double = 321. @@ -543,12 +543,12 @@ assert c.m_pod.m_int == 123 assert c.m_pod.m_double == 321. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_ptr_in(p) assert c.m_pod.m_int == 123 assert c.m_pod.m_double == 321. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_ptr_out(p) assert p.m_int == 888 assert p.m_double == 3.14 @@ -556,26 +556,26 @@ p.m_int = 555 p.m_double = 666. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_ref(p) assert c.m_pod.m_int == 555 assert c.m_pod.m_double == 666. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_ptrptr_in(p) assert c.m_pod.m_int == 555 assert c.m_pod.m_double == 666. assert p.m_int == 555 assert p.m_double == 666. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_void_ptrptr_in(p) assert c.m_pod.m_int == 555 assert c.m_pod.m_double == 666. assert p.m_int == 555 assert p.m_double == 666. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_ptrptr_out(p) assert c.m_pod.m_int == 888 assert c.m_pod.m_double == 3.14 @@ -585,7 +585,7 @@ p.m_int = 777 p.m_double = 888. - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() c.set_pod_void_ptrptr_out(p) assert c.m_pod.m_int == 888 assert c.m_pod.m_double == 3.14 @@ -595,10 +595,10 @@ def test16_nullptr_passing(self): """Integer 0 ('NULL') and None allowed to pass through instance*""" - import cppyy + import _cppyy for o in (0, None): - c = cppyy.gbl.CppyyTestData() + c = _cppyy.gbl.CppyyTestData() assert c.m_pod.m_int == 888 assert c.m_pod.m_double == 3.14 assert not not c.m_ppod @@ -610,8 +610,8 @@ def test17_respect_privacy(self): """Test that privacy settings are respected""" - import cppyy - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() assert isinstance(c, CppyyTestData) @@ -623,26 +623,26 @@ def test18_object_and_pointer_comparisons(self): """Verify object and pointer comparisons""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl - c1 = cppyy.bind_object(0, gbl.CppyyTestData) + c1 = _cppyy.bind_object(0, gbl.CppyyTestData) assert c1 == None assert None == c1 - c2 = cppyy.bind_object(0, gbl.CppyyTestData) + c2 = _cppyy.bind_object(0, gbl.CppyyTestData) assert c1 == c2 assert c2 == c1 # FourVector overrides operator== - l1 = cppyy.bind_object(0, gbl.FourVector) + l1 = _cppyy.bind_object(0, gbl.FourVector) assert l1 == None assert None == l1 assert c1 != l1 assert l1 != c1 - l2 = cppyy.bind_object(0, gbl.FourVector) + l2 = _cppyy.bind_object(0, gbl.FourVector) assert l1 == l2 assert l2 == l1 @@ -660,7 +660,7 @@ def test19_object_validity(self): """Test object validity checking""" - from cppyy import gbl + from _cppyy import gbl d = gbl.CppyyTestPod() @@ -674,8 +674,8 @@ def test20_buffer_reshaping(self): """Test usage of buffer sizing""" - import cppyy - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() for func in ['get_bool_array', 'get_bool_array2', @@ -695,34 +695,34 @@ def test21_voidp(self): """Test usage of void* data""" - import cppyy - CppyyTestData = cppyy.gbl.CppyyTestData + import _cppyy + CppyyTestData = _cppyy.gbl.CppyyTestData c = CppyyTestData() - assert not cppyy.gbl.nullptr + assert not _cppyy.gbl.nullptr - assert c.s_voidp is cppyy.gbl.nullptr - assert CppyyTestData.s_voidp is cppyy.gbl.nullptr + assert c.s_voidp is _cppyy.gbl.nullptr + assert CppyyTestData.s_voidp is _cppyy.gbl.nullptr - assert c.m_voidp is cppyy.gbl.nullptr - assert c.get_voidp() is cppyy.gbl.nullptr + assert c.m_voidp is _cppyy.gbl.nullptr + assert c.get_voidp() is _cppyy.gbl.nullptr c2 = CppyyTestData() - assert c2.m_voidp is cppyy.gbl.nullptr + assert c2.m_voidp is _cppyy.gbl.nullptr c.set_voidp(c2.m_voidp) - assert c.m_voidp is cppyy.gbl.nullptr + assert c.m_voidp is _cppyy.gbl.nullptr c.set_voidp(c2.get_voidp()) - assert c.m_voidp is cppyy.gbl.nullptr - c.set_voidp(cppyy.gbl.nullptr) - assert c.m_voidp is cppyy.gbl.nullptr + assert c.m_voidp is _cppyy.gbl.nullptr + c.set_voidp(_cppyy.gbl.nullptr) + assert c.m_voidp is _cppyy.gbl.nullptr c.set_voidp(c2) def address_equality_test(a, b): - assert cppyy.addressof(a) == cppyy.addressof(b) - b2 = cppyy.bind_object(a, CppyyTestData) + assert _cppyy.addressof(a) == _cppyy.addressof(b) + b2 = _cppyy.bind_object(a, CppyyTestData) assert b is b2 # memory regulator recycles - b3 = cppyy.bind_object(cppyy.addressof(a), CppyyTestData) + b3 = _cppyy.bind_object(_cppyy.addressof(a), CppyyTestData) assert b is b3 # likewise address_equality_test(c.m_voidp, c2) @@ -730,8 +730,8 @@ def null_test(null): c.m_voidp = null - assert c.m_voidp is cppyy.gbl.nullptr - map(null_test, [0, None, cppyy.gbl.nullptr]) + assert c.m_voidp is _cppyy.gbl.nullptr + map(null_test, [0, None, _cppyy.gbl.nullptr]) c.m_voidp = c2 address_equality_test(c.m_voidp, c2) diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/_cppyy/test/test_fragile.py rename from pypy/module/cppyy/test/test_fragile.py rename to pypy/module/_cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/_cppyy/test/test_fragile.py @@ -1,6 +1,6 @@ import py, os, sys -from pypy.module.cppyy import capi +from pypy.module._cppyy import capi currpath = py.path.local(__file__).dirpath() @@ -14,35 +14,35 @@ raise OSError("'make' failed (see stderr)") From pypy.commits at gmail.com Wed Jul 19 16:46:39 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:39 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: remove all cint backend references Message-ID: <596fc52f.c8a0df0a.535c8.26f9@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r91927:7480a9a40022 Date: 2017-07-18 16:36 -0700 http://bitbucket.org/pypy/pypy/changeset/7480a9a40022/ Log: remove all cint backend references diff --git a/pypy/module/_cppyy/test/test_advancedcpp.py b/pypy/module/_cppyy/test/test_advancedcpp.py --- a/pypy/module/_cppyy/test/test_advancedcpp.py +++ b/pypy/module/_cppyy/test/test_advancedcpp.py @@ -1,11 +1,9 @@ import py, os, sys -from pypy.module._cppyy import capi - - currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("advancedcppDict.so")) + def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") @@ -19,7 +17,6 @@ def setup_class(cls): cls.w_test_dct = cls.space.newtext(test_dct) - cls.w_capi_identity = cls.space.newtext(capi.identify()) cls.w_advanced = cls.space.appexec([], """(): import _cppyy return _cppyy.load_reflection_info(%r)""" % (test_dct, )) @@ -538,9 +535,6 @@ assert c1.m_c == 3 c1.destruct() - if self.capi_identity == 'CINT': # CINT does not support dynamic casts - return - c2 = _cppyy.gbl.create_c2() assert type(c2) == _cppyy.gbl.c_class_2 assert c2.m_c == 3 @@ -549,15 +543,6 @@ def test14_new_overloader(self): """Verify that class-level overloaded new/delete are called""" - # TODO: operator new appears to be respected by CINT, but operator - # delete is not called through root/meta. Anyway, Reflex gets it all - # wrong (clear from the generated code). Keep this test as it should - # be all better in the cling/llvm world ... - - # TODO: get the capi-identify test selection right ... - if self.capi_identity != 'CINT': # don't test anything for Reflex - return - import _cppyy assert _cppyy.gbl.new_overloader.s_instances == 0 @@ -565,9 +550,6 @@ assert _cppyy.gbl.new_overloader.s_instances == 1 nl.destruct() - if self.capi_identity == 'CINT': # do not test delete - return - import gc gc.collect() assert _cppyy.gbl.new_overloader.s_instances == 0 diff --git a/pypy/module/_cppyy/test/test_fragile.py b/pypy/module/_cppyy/test/test_fragile.py --- a/pypy/module/_cppyy/test/test_fragile.py +++ b/pypy/module/_cppyy/test/test_fragile.py @@ -1,11 +1,9 @@ import py, os, sys -from pypy.module._cppyy import capi - - currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("fragileDict.so")) + def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") @@ -18,7 +16,6 @@ def setup_class(cls): cls.w_test_dct = cls.space.newtext(test_dct) - cls.w_identity = cls.space.newtext(capi.identify()) cls.w_fragile = cls.space.appexec([], """(): import _cppyy return _cppyy.load_reflection_info(%r)""" % (test_dct, )) @@ -213,20 +210,13 @@ import _cppyy - if self.identity == 'CINT': # CINT only support classes on global space - members = dir(_cppyy.gbl) - assert 'TROOT' in members - assert 'TSystem' in members - assert 'TClass' in members - members = dir(_cppyy.gbl.fragile) - else: - members = dir(_cppyy.gbl.fragile) - assert 'A' in members - assert 'B' in members - assert 'C' in members - assert 'D' in members # classes + members = dir(_cppyy.gbl.fragile) + assert 'A' in members + assert 'B' in members + assert 'C' in members + assert 'D' in members # classes - assert 'nested1' in members # namespace + assert 'nested1' in members # namespace # TODO: think this through ... probably want this, but interferes with # the (new) policy of lazy lookups diff --git a/pypy/module/_cppyy/test/test_zjit.py b/pypy/module/_cppyy/test/test_zjit.py --- a/pypy/module/_cppyy/test/test_zjit.py +++ b/pypy/module/_cppyy/test/test_zjit.py @@ -8,9 +8,7 @@ from pypy.module._cppyy import interp_cppyy, capi, executor # These tests are for the backend that support the fast path only. -if capi.identify() == 'CINT': - py.test.skip("CINT does not support fast path") -elif capi.identify() == 'loadable_capi': +if capi.identify() == 'loadable_capi': py.test.skip("can not currently use FakeSpace with _cffi_backend") elif os.getenv("CPPYY_DISABLE_FASTPATH"): py.test.skip("fast path is disabled by CPPYY_DISABLE_FASTPATH envar") From pypy.commits at gmail.com Wed Jul 19 16:46:46 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:46 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: remove cppyy.rst and point to the new external documentation instead Message-ID: <596fc536.cd3f1c0a.16e95.4f43@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r91931:a14ed2de9bb6 Date: 2017-07-19 13:26 -0700 http://bitbucket.org/pypy/pypy/changeset/a14ed2de9bb6/ Log: remove cppyy.rst and point to the new external documentation instead diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst deleted file mode 100644 --- a/pypy/doc/cppyy.rst +++ /dev/null @@ -1,672 +0,0 @@ -cppyy: C++ bindings for PyPy -============================ - -The cppyy module delivers dynamic Python-C++ bindings. -It is designed for automation, high performance, scale, interactivity, and -handling all of modern C++ (11, 14, etc.). -It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ -reflection and interactivity. -Reflection information is extracted from C++ header files. -Cppyy itself is built into PyPy (an alternative exists for CPython), but -it requires a `backend`_, installable through pip, to interface with Cling. - -.. _Cling: https://root.cern.ch/cling -.. _LLVM: http://llvm.org/ -.. _clang: http://clang.llvm.org/ -.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend - - -Installation ------------- - -This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy -module, which is no longer supported. -Both the tooling and user-facing Python codes are very backwards compatible, -however. -Further dependencies are cmake (for general build), Python2.7 (for LLVM), and -a modern C++ compiler (one that supports at least C++11). - -Assuming you have a recent enough version of PyPy installed, use pip to -complete the installation of cppyy:: - - $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend - -Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS -environment variable) to a number appropriate for your machine. -The building process may take quite some time as it includes a customized -version of LLVM as part of Cling, which is why --verbose is recommended so that -you can see the build progress. - -The default installation will be under -$PYTHONHOME/site-packages/cppyy_backend/lib, -which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). -If you need the dictionary and class map generation tools (used in the examples -below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your -executable path (PATH). - - -Basic bindings example ----------------------- - -These examples assume that cppyy_backend is pointed to by the environment -variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and -CPPYYHOME/bin to PATH. - -Let's first test with a trivial example whether all packages are properly -installed and functional. -Create a C++ header file with some class in it (all functions are made inline -for convenience; if you have out-of-line code, link with it as appropriate):: - - $ cat MyClass.h - class MyClass { - public: - MyClass(int i = -99) : m_myint(i) {} - - int GetMyInt() { return m_myint; } - void SetMyInt(int i) { m_myint = i; } - - public: - int m_myint; - }; - -Then, generate the bindings using ``genreflex`` (installed under -cppyy_backend/bin in site_packages), and compile the code:: - - $ genreflex MyClass.h - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling - -Next, make sure that the library can be found through the dynamic lookup path -(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), -for example by adding ".". -Now you're ready to use the bindings. -Since the bindings are designed to look pythonistic, it should be -straightforward:: - - $ pypy-c - >>>> import cppyy - >>>> cppyy.load_reflection_info("libMyClassDict.so") - - >>>> myinst = cppyy.gbl.MyClass(42) - >>>> print myinst.GetMyInt() - 42 - >>>> myinst.SetMyInt(33) - >>>> print myinst.m_myint - 33 - >>>> myinst.m_myint = 77 - >>>> print myinst.GetMyInt() - 77 - >>>> help(cppyy.gbl.MyClass) # shows that normal python introspection works - -That's all there is to it! - - -Automatic class loader ----------------------- - -There is one big problem in the code above, that prevents its use in a (large -scale) production setting: the explicit loading of the reflection library. -Clearly, if explicit load statements such as these show up in code downstream -from the ``MyClass`` package, then that prevents the ``MyClass`` author from -repackaging or even simply renaming the dictionary library. - -The solution is to make use of an automatic class loader, so that downstream -code never has to call ``load_reflection_info()`` directly. -The class loader makes use of so-called rootmap files, which ``genreflex`` -can produce. -These files contain the list of available C++ classes and specify the library -that needs to be loaded for their use (as an aside, this listing allows for a -cross-check to see whether reflection info is generated for all classes that -you expect). -By convention, the rootmap files should be located next to the reflection info -libraries, so that they can be found through the normal shared library search -path. -They can be concatenated together, or consist of a single rootmap file per -library. -For example:: - - $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling - -where the first option (``--rootmap``) specifies the output file name, and the -second option (``--rootmap-lib``) the name of the reflection library where -``MyClass`` will live. -It is necessary to provide that name explicitly, since it is only in the -separate linking step where this name is fixed. -If the second option is not given, the library is assumed to be libMyClass.so, -a name that is derived from the name of the header file. - -With the rootmap file in place, the above example can be rerun without explicit -loading of the reflection info library:: - - $ pypy-c - >>>> import cppyy - >>>> myinst = cppyy.gbl.MyClass(42) - >>>> print myinst.GetMyInt() - 42 - >>>> # etc. ... - -As a caveat, note that the class loader is currently limited to classes only. - - -Advanced example ----------------- - -The following snippet of C++ is very contrived, to allow showing that such -pathological code can be handled and to show how certain features play out in -practice:: - - $ cat MyAdvanced.h - #include - - class Base1 { - public: - Base1(int i) : m_i(i) {} - virtual ~Base1() {} - int m_i; - }; - - class Base2 { - public: - Base2(double d) : m_d(d) {} - virtual ~Base2() {} - double m_d; - }; - - class C; - - class Derived : public virtual Base1, public virtual Base2 { - public: - Derived(const std::string& name, int i, double d) : Base1(i), Base2(d), m_name(name) {} - virtual C* gimeC() { return (C*)0; } - std::string m_name; - }; - - Base2* BaseFactory(const std::string& name, int i, double d) { - return new Derived(name, i, d); - } - -This code is still only in a header file, with all functions inline, for -convenience of the example. -If the implementations live in a separate source file or shared library, the -only change needed is to link those in when building the reflection library. - -If you were to run ``genreflex`` like above in the basic example, you will -find that not all classes of interest will be reflected, nor will be the -global factory function. -In particular, ``std::string`` will be missing, since it is not defined in -this header file, but in a header file that is included. -In practical terms, general classes such as ``std::string`` should live in a -core reflection set, but for the moment assume we want to have it in the -reflection library that we are building for this example. - -The ``genreflex`` script can be steered using a so-called `selection file`_ -(see "Generating Reflex Dictionaries") -which is a simple XML file specifying, either explicitly or by using a -pattern, which classes, variables, namespaces, etc. to select from the given -header file. -With the aid of a selection file, a large project can be easily managed: -simply ``#include`` all relevant headers into a single header file that is -handed to ``genreflex``. -In fact, if you hand multiple header files to ``genreflex``, then a selection -file is almost obligatory: without it, only classes from the last header will -be selected. -Then, apply a selection file to pick up all the relevant classes. -For our purposes, the following rather straightforward selection will do -(the name ``lcgdict`` for the root is historical, but required):: - - $ cat MyAdvanced.xml - - - - - - - -.. _selection file: https://root.cern.ch/how/how-use-reflex - -Now the reflection info can be generated and compiled:: - - $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling - -and subsequently be used from PyPy:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info("libAdvExDict.so") - - >>>> d = cppyy.gbl.BaseFactory("name", 42, 3.14) - >>>> type(d) - - >>>> isinstance(d, cppyy.gbl.Base1) - True - >>>> isinstance(d, cppyy.gbl.Base2) - True - >>>> d.m_i, d.m_d - (42, 3.14) - >>>> d.m_name == "name" - True - >>>> - -Again, that's all there is to it! - -A couple of things to note, though. -If you look back at the C++ definition of the ``BaseFactory`` function, -you will see that it declares the return type to be a ``Base2``, yet the -bindings return an object of the actual type ``Derived``? -This choice is made for a couple of reasons. -First, it makes method dispatching easier: if bound objects are always their -most derived type, then it is easy to calculate any offsets, if necessary. -Second, it makes memory management easier: the combination of the type and -the memory address uniquely identifies an object. -That way, it can be recycled and object identity can be maintained if it is -entered as a function argument into C++ and comes back to PyPy as a return -value. -Last, but not least, casting is decidedly unpythonistic. -By always providing the most derived type known, casting becomes unnecessary. -For example, the data member of ``Base2`` is simply directly available. -Note also that the unreflected ``gimeC`` method of ``Derived`` does not -preclude its use. -It is only the ``gimeC`` method that is unusable as long as class ``C`` is -unknown to the system. - - -Features --------- - -The following is not meant to be an exhaustive list, since cppyy is still -under active development. -Furthermore, the intention is that every feature is as natural as possible on -the python side, so if you find something missing in the list below, simply -try it out. -It is not always possible to provide exact mapping between python and C++ -(active memory management is one such case), but by and large, if the use of a -feature does not strike you as obvious, it is more likely to simply be a bug. -That is a strong statement to make, but also a worthy goal. -For the C++ side of the examples, refer to this :doc:`example code `, which was -bound using:: - - $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling - -* **abstract classes**: Are represented as python classes, since they are - needed to complete the inheritance hierarchies, but will raise an exception - if an attempt is made to instantiate from them. - Example:: - - >>>> from cppyy.gbl import AbstractClass, ConcreteClass - >>>> a = AbstractClass() - Traceback (most recent call last): - File "", line 1, in - TypeError: cannot instantiate abstract class 'AbstractClass' - >>>> issubclass(ConcreteClass, AbstractClass) - True - >>>> c = ConcreteClass() - >>>> isinstance(c, AbstractClass) - True - >>>> - -* **arrays**: Supported for builtin data types only, as used from module - ``array``. - Out-of-bounds checking is limited to those cases where the size is known at - compile time (and hence part of the reflection info). - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> from array import array - >>>> c = ConcreteClass() - >>>> c.array_method(array('d', [1., 2., 3., 4.]), 4) - 1 2 3 4 - >>>> - -* **builtin data types**: Map onto the expected equivalent python types, with - the caveat that there may be size differences, and thus it is possible that - exceptions are raised if an overflow is detected. - -* **casting**: Is supposed to be unnecessary. - Object pointer returns from functions provide the most derived class known - in the hierarchy of the object being returned. - This is important to preserve object identity as well as to make casting, - a pure C++ feature after all, superfluous. - Example:: - - >>>> from cppyy.gbl import AbstractClass, ConcreteClass - >>>> c = ConcreteClass() - >>>> ConcreteClass.show_autocast.__doc__ - 'AbstractClass* ConcreteClass::show_autocast()' - >>>> d = c.show_autocast() - >>>> type(d) - - >>>> - - However, if need be, you can perform C++-style reinterpret_casts (i.e. - without taking offsets into account), by taking and rebinding the address - of an object:: - - >>>> from cppyy import addressof, bind_object - >>>> e = bind_object(addressof(d), AbstractClass) - >>>> type(e) - - >>>> - -* **classes and structs**: Get mapped onto python classes, where they can be - instantiated as expected. - If classes are inner classes or live in a namespace, their naming and - location will reflect that. - Example:: - - >>>> from cppyy.gbl import ConcreteClass, Namespace - >>>> ConcreteClass == Namespace.ConcreteClass - False - >>>> n = Namespace.ConcreteClass.NestedClass() - >>>> type(n) - - >>>> - -* **data members**: Public data members are represented as python properties - and provide read and write access on instances as expected. - Private and protected data members are not accessible. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() - >>>> c.m_int - 42 - >>>> - -* **default arguments**: C++ default arguments work as expected, but python - keywords are not supported. - It is technically possible to support keywords, but for the C++ interface, - the formal argument names have no meaning and are not considered part of the - API, hence it is not a good idea to use keywords. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() # uses default argument - >>>> c.m_int - 42 - >>>> c = ConcreteClass(13) - >>>> c.m_int - 13 - >>>> - -* **doc strings**: The doc string of a method or function contains the C++ - arguments and return types of all overloads of that name, as applicable. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> print ConcreteClass.array_method.__doc__ - void ConcreteClass::array_method(int*, int) - void ConcreteClass::array_method(double*, int) - >>>> - -* **enums**: Are translated as ints with no further checking. - -* **functions**: Work as expected and live in their appropriate namespace - (which can be the global one, ``cppyy.gbl``). - -* **inheritance**: All combinations of inheritance on the C++ (single, - multiple, virtual) are supported in the binding. - However, new python classes can only use single inheritance from a bound C++ - class. - Multiple inheritance would introduce two "this" pointers in the binding. - This is a current, not a fundamental, limitation. - The C++ side will not see any overridden methods on the python side, as - cross-inheritance is planned but not yet supported. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> help(ConcreteClass) - Help on class ConcreteClass in module __main__: - - class ConcreteClass(AbstractClass) - | Method resolution order: - | ConcreteClass - | AbstractClass - | cppyy.CPPObject - | __builtin__.CPPInstance - | __builtin__.object - | - | Methods defined here: - | - | ConcreteClass(self, *args) - | ConcreteClass::ConcreteClass(const ConcreteClass&) - | ConcreteClass::ConcreteClass(int) - | ConcreteClass::ConcreteClass() - | - etc. .... - -* **memory**: C++ instances created by calling their constructor from python - are owned by python. - You can check/change the ownership with the _python_owns flag that every - bound instance carries. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() - >>>> c._python_owns # True: object created in Python - True - >>>> - -* **methods**: Are represented as python methods and work as expected. - They are first class objects and can be bound to an instance. - Virtual C++ methods work as expected. - To select a specific virtual method, do like with normal python classes - that override methods: select it from the class that you need, rather than - calling the method on the instance. - To select a specific overload, use the __dispatch__ special function, which - takes the name of the desired method and its signature (which can be - obtained from the doc string) as arguments. - -* **namespaces**: Are represented as python classes. - Namespaces are more open-ended than classes, so sometimes initial access may - result in updates as data and functions are looked up and constructed - lazily. - Thus the result of ``dir()`` on a namespace shows the classes available, - even if they may not have been created yet. - It does not show classes that could potentially be loaded by the class - loader. - Once created, namespaces are registered as modules, to allow importing from - them. - Namespace currently do not work with the class loader. - Fixing these bootstrap problems is on the TODO list. - The global namespace is ``cppyy.gbl``. - -* **NULL**: Is represented as ``cppyy.gbl.nullptr``. - In C++11, the keyword ``nullptr`` is used to represent ``NULL``. - For clarity of intent, it is recommended to use this instead of ``None`` - (or the integer ``0``, which can serve in some cases), as ``None`` is better - understood as ``void`` in C++. - -* **operator conversions**: If defined in the C++ class and a python - equivalent exists (i.e. all builtin integer and floating point types, as well - as ``bool``), it will map onto that python conversion. - Note that ``char*`` is mapped onto ``__str__``. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> print ConcreteClass() - Hello operator const char*! - >>>> - -* **operator overloads**: If defined in the C++ class and if a python - equivalent is available (not always the case, think e.g. of ``operator||``), - then they work as expected. - Special care needs to be taken for global operator overloads in C++: first, - make sure that they are actually reflected, especially for the global - overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterate over a vector). - Second, make sure that reflection info is loaded in the proper order. - I.e. that these global overloads are available before use. - -* **pointers**: For builtin data types, see arrays. - For objects, a pointer to an object and an object looks the same, unless - the pointer is a data member. - In that case, assigning to the data member will cause a copy of the pointer - and care should be taken about the object's life time. - If a pointer is a global variable, the C++ side can replace the underlying - object and the python side will immediately reflect that. - -* **PyObject***: Arguments and return types of ``PyObject*`` can be used, and - passed on to CPython API calls. - Since these CPython-like objects need to be created and tracked (this all - happens through ``cpyext``) this interface is not particularly fast. - -* **static data members**: Are represented as python property objects on the - class and the meta-class. - Both read and write access is as expected. - -* **static methods**: Are represented as python's ``staticmethod`` objects - and can be called both from the class as well as from instances. - -* **strings**: The std::string class is considered a builtin C++ type and - mixes quite well with python's str. - Python's str can be passed where a ``const char*`` is expected, and an str - will be returned if the return type is ``const char*``. - -* **templated classes**: Are represented in a meta-class style in python. - This may look a little bit confusing, but conceptually is rather natural. - For example, given the class ``std::vector``, the meta-class part would - be ``std.vector``. - Then, to get the instantiation on ``int``, do ``std.vector(int)`` and to - create an instance of that class, do ``std.vector(int)()``:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info('libexampleDict.so') - >>>> cppyy.gbl.std.vector # template metatype - - >>>> cppyy.gbl.std.vector(int) # instantiates template -> class - '> - >>>> cppyy.gbl.std.vector(int)() # instantiates class -> object - <__main__.std::vector object at 0x00007fe480ba4bc0> - >>>> - - Note that templates can be build up by handing actual types to the class - instantiation (as done in this vector example), or by passing in the list of - template arguments as a string. - The former is a lot easier to work with if you have template instantiations - using classes that themselves are templates in the arguments (think e.g a - vector of vectors). - All template classes must already exist in the loaded reflection info, they - do not work (yet) with the class loader. - - For compatibility with other bindings generators, use of square brackets - instead of parenthesis to instantiate templates is supported as well. - -* **templated functions**: Automatically participate in overloading and are - used in the same way as other global functions. - -* **templated methods**: For now, require an explicit selection of the - template parameters. - This will be changed to allow them to participate in overloads as expected. - -* **typedefs**: Are simple python references to the actual classes to which - they refer. - -* **unary operators**: Are supported if a python equivalent exists, and if the - operator is defined in the C++ class. - -You can always find more detailed examples and see the full of supported -features by looking at the tests in pypy/module/cppyy/test. - -If a feature or reflection info is missing, this is supposed to be handled -gracefully. -In fact, there are unit tests explicitly for this purpose (even as their use -becomes less interesting over time, as the number of missing features -decreases). -Only when a missing feature is used, should there be an exception. -For example, if no reflection info is available for a return type, then a -class that has a method with that return type can still be used. -Only that one specific method can not be used. - - -Templates ---------- - -Templates can be automatically instantiated, assuming the appropriate header -files have been loaded or are accessible to the class loader. -This is the case for example for all of STL. -For example:: - - $ cat MyTemplate.h - #include - - class MyClass { - public: - MyClass(int i = -99) : m_i(i) {} - MyClass(const MyClass& s) : m_i(s.m_i) {} - MyClass& operator=(const MyClass& s) { m_i = s.m_i; return *this; } - ~MyClass() {} - int m_i; - }; - -Run the normal ``genreflex`` and compilation steps:: - - $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling - -Subsequent use should be as expected. -Note the meta-class style of "instantiating" the template:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info("libTemplateDict.so") - >>>> std = cppyy.gbl.std - >>>> MyClass = cppyy.gbl.MyClass - >>>> v = std.vector(MyClass)() - >>>> v += [MyClass(1), MyClass(2), MyClass(3)] - >>>> for m in v: - .... print m.m_i, - .... - 1 2 3 - >>>> - -The arguments to the template instantiation can either be a string with the -full list of arguments, or the explicit classes. -The latter makes for easier code writing if the classes passed to the -instantiation are themselves templates. - - -The fast lane -------------- - -By default, cppyy will use direct function pointers through `CFFI`_ whenever -possible. If this causes problems for you, you can disable it by setting the -CPPYY_DISABLE_FASTPATH environment variable. - -.. _CFFI: https://cffi.readthedocs.io/en/latest/ - - -CPython -------- - -Most of the ideas in cppyy come originally from the `PyROOT`_ project, which -contains a CPython-based cppyy.py module (with similar dependencies as the -one that comes with PyPy). -A standalone pip-installable version is planned, but for now you can install -ROOT through your favorite distribution installer (available in the science -section). - -.. _PyROOT: https://root.cern.ch/pyroot - -There are a couple of minor differences between the two versions of cppyy -(the CPython version has a few more features). -Work is on-going to integrate the nightly tests of both to make sure their -feature sets are equalized. - - -Python3 -------- - -The CPython version of cppyy supports Python3, assuming your packager has -build the backend for it. -The cppyy module has not been tested with the `Py3k`_ version of PyPy. -Note that the generated reflection information (from ``genreflex``) is fully -independent of Python, and does not need to be rebuild when switching versions -or interpreters. - -.. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k - - -.. toctree:: - :hidden: - - cppyy_example diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -61,29 +61,23 @@ .. _libffi: http://sourceware.org/libffi/ -Cling and cppyy ---------------- +cppyy +----- -The builtin :doc:`cppyy ` module uses reflection information, provided by -`Cling`_ (which needs to be `installed separately`_), of C/C++ code to -automatically generate bindings at runtime. -In Python, classes and functions are always runtime structures, so when they -are generated matters not for performance. -However, if the backend itself is capable of dynamic behavior, it is a much -better functional match, allowing tighter integration and more natural -language mappings. +For C++, `cppyy`_ is an automated bindings generator available for both +PyPy and CPython. +``cppyy`` relies on declarations from C++ header files to dynamically +construct Python equivalent classes, functions, variables, etc. +It is designed for use by large scale programs and supports modern C++. +With PyPy, it leverages the built-in ``_cppyy`` module, allowing the JIT to +remove most of the cross-language overhead. -The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove -most cross-language call overhead. +To install, run ``pip install cppyy``. +Further details are available in the `full documentation`_. -:doc:Full details are `available here `. +.. _cppyy: http://cppyy.readthedocs.org/ +.. _`full documentation`: http://cppyy.readthedocs.org/ -.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend -.. _Cling: https://root.cern.ch/cling - -.. toctree:: - - cppyy RPython Mixed Modules --------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -25,3 +25,8 @@ .. branch: cpyext-hash_notimpl If ``tp_hash`` is ``PyObject_HashNotImplemented``, set ``obj.__dict__['__hash__']`` to None + +.. branch: cppyy-packaging + +Renaming of ``cppyy`` to ``_cppyy``. +The former is now an external package installable with ``pip install cppyy``. From pypy.commits at gmail.com Wed Jul 19 16:46:44 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:44 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: renaming cppyy -> _cppyy in files to ignore Message-ID: <596fc534.d49c1c0a.362db.4e7c@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r91930:e1cc8eafb082 Date: 2017-07-19 12:30 -0700 http://bitbucket.org/pypy/pypy/changeset/e1cc8eafb082/ Log: renaming cppyy -> _cppyy in files to ignore diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -25,16 +25,17 @@ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ -^pypy/module/cppyy/src/.+\.o$ -^pypy/module/cppyy/bench/.+\.so$ -^pypy/module/cppyy/bench/.+\.root$ -^pypy/module/cppyy/bench/.+\.d$ -^pypy/module/cppyy/src/.+\.errors$ -^pypy/module/cppyy/test/.+_rflx\.cpp$ -^pypy/module/cppyy/test/.+\.so$ -^pypy/module/cppyy/test/.+\.rootmap$ -^pypy/module/cppyy/test/.+\.exe$ -^pypy/module/cppyy/test/.+_cint.h$ +^pypy/module/_cppyy/src/.+\.o$ +^pypy/module/_cppyy/bench/.+\.so$ +^pypy/module/_cppyy/bench/.+\.root$ +^pypy/module/_cppyy/bench/.+\.d$ +^pypy/module/_cppyy/src/.+\.errors$ +^pypy/module/_cppyy/test/.+_rflx\.cpp$ +^pypy/module/_cppyy/test/.+\.so$ +^pypy/module/_cppyy/test/.+\.rootmap$ +^pypy/module/_cppyy/test/.+\.exe$ +^pypy/module/_cppyy/test/.+_cint.h$ +^pypy/module/_cppyy/.+/*\.pcm$ ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$ ^pypy/doc/.+\.html$ ^pypy/doc/config/.+\.rst$ @@ -88,6 +89,3 @@ ^release/ ^rpython/_cache$ -pypy/module/cppyy/.+/*\.pcm - - From pypy.commits at gmail.com Wed Jul 19 16:46:48 2017 From: pypy.commits at gmail.com (wlav) Date: Wed, 19 Jul 2017 13:46:48 -0700 (PDT) Subject: [pypy-commit] pypy default: merged cppyy-packaging; renaming cppyy -> _cppyy Message-ID: <596fc538.c49edf0a.fe5e1.8ead@mx.google.com> Author: Wim Lavrijsen Branch: Changeset: r91932:992156d58504 Date: 2017-07-19 13:29 -0700 http://bitbucket.org/pypy/pypy/changeset/992156d58504/ Log: merged cppyy-packaging; renaming cppyy -> _cppyy diff too long, truncating to 2000 out of 4103 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -25,16 +25,17 @@ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ -^pypy/module/cppyy/src/.+\.o$ -^pypy/module/cppyy/bench/.+\.so$ -^pypy/module/cppyy/bench/.+\.root$ -^pypy/module/cppyy/bench/.+\.d$ -^pypy/module/cppyy/src/.+\.errors$ -^pypy/module/cppyy/test/.+_rflx\.cpp$ -^pypy/module/cppyy/test/.+\.so$ -^pypy/module/cppyy/test/.+\.rootmap$ -^pypy/module/cppyy/test/.+\.exe$ -^pypy/module/cppyy/test/.+_cint.h$ +^pypy/module/_cppyy/src/.+\.o$ +^pypy/module/_cppyy/bench/.+\.so$ +^pypy/module/_cppyy/bench/.+\.root$ +^pypy/module/_cppyy/bench/.+\.d$ +^pypy/module/_cppyy/src/.+\.errors$ +^pypy/module/_cppyy/test/.+_rflx\.cpp$ +^pypy/module/_cppyy/test/.+\.so$ +^pypy/module/_cppyy/test/.+\.rootmap$ +^pypy/module/_cppyy/test/.+\.exe$ +^pypy/module/_cppyy/test/.+_cint.h$ +^pypy/module/_cppyy/.+/*\.pcm$ ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$ ^pypy/doc/.+\.html$ ^pypy/doc/config/.+\.rst$ @@ -88,6 +89,3 @@ ^release/ ^rpython/_cache$ -pypy/module/cppyy/.+/*\.pcm - - diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -36,7 +36,7 @@ "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", - "_csv", "cppyy", "_pypyjson", "_jitlog" + "_csv", "_cppyy", "_pypyjson", "_jitlog" ]) from rpython.jit.backend import detect_cpu @@ -67,8 +67,8 @@ if name in translation_modules: translation_modules.remove(name) - if "cppyy" in working_modules: - working_modules.remove("cppyy") # not tested on win32 + if "_cppyy" in working_modules: + working_modules.remove("_cppyy") # not tested on win32 if "faulthandler" in working_modules: working_modules.remove("faulthandler") # missing details @@ -79,8 +79,8 @@ working_modules.remove('fcntl') # LOCK_NB not defined working_modules.remove("_minimal_curses") working_modules.remove("termios") - if "cppyy" in working_modules: - working_modules.remove("cppyy") # depends on ctypes + if "_cppyy" in working_modules: + working_modules.remove("_cppyy") # depends on ctypes #if sys.platform.startswith("linux"): # _mach = os.popen('uname -m', 'r').read().strip() @@ -92,7 +92,7 @@ '_multiprocessing': [('objspace.usemodules.time', True), ('objspace.usemodules.thread', True)], 'cpyext': [('objspace.usemodules.array', True)], - 'cppyy': [('objspace.usemodules.cpyext', True)], + '_cppyy': [('objspace.usemodules.cpyext', True)], 'faulthandler': [('objspace.usemodules._vmprof', True)], } module_suggests = { diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst deleted file mode 100644 --- a/pypy/doc/cppyy.rst +++ /dev/null @@ -1,672 +0,0 @@ -cppyy: C++ bindings for PyPy -============================ - -The cppyy module delivers dynamic Python-C++ bindings. -It is designed for automation, high performance, scale, interactivity, and -handling all of modern C++ (11, 14, etc.). -It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ -reflection and interactivity. -Reflection information is extracted from C++ header files. -Cppyy itself is built into PyPy (an alternative exists for CPython), but -it requires a `backend`_, installable through pip, to interface with Cling. - -.. _Cling: https://root.cern.ch/cling -.. _LLVM: http://llvm.org/ -.. _clang: http://clang.llvm.org/ -.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend - - -Installation ------------- - -This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy -module, which is no longer supported. -Both the tooling and user-facing Python codes are very backwards compatible, -however. -Further dependencies are cmake (for general build), Python2.7 (for LLVM), and -a modern C++ compiler (one that supports at least C++11). - -Assuming you have a recent enough version of PyPy installed, use pip to -complete the installation of cppyy:: - - $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend - -Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS -environment variable) to a number appropriate for your machine. -The building process may take quite some time as it includes a customized -version of LLVM as part of Cling, which is why --verbose is recommended so that -you can see the build progress. - -The default installation will be under -$PYTHONHOME/site-packages/cppyy_backend/lib, -which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). -If you need the dictionary and class map generation tools (used in the examples -below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your -executable path (PATH). - - -Basic bindings example ----------------------- - -These examples assume that cppyy_backend is pointed to by the environment -variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and -CPPYYHOME/bin to PATH. - -Let's first test with a trivial example whether all packages are properly -installed and functional. -Create a C++ header file with some class in it (all functions are made inline -for convenience; if you have out-of-line code, link with it as appropriate):: - - $ cat MyClass.h - class MyClass { - public: - MyClass(int i = -99) : m_myint(i) {} - - int GetMyInt() { return m_myint; } - void SetMyInt(int i) { m_myint = i; } - - public: - int m_myint; - }; - -Then, generate the bindings using ``genreflex`` (installed under -cppyy_backend/bin in site_packages), and compile the code:: - - $ genreflex MyClass.h - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling - -Next, make sure that the library can be found through the dynamic lookup path -(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), -for example by adding ".". -Now you're ready to use the bindings. -Since the bindings are designed to look pythonistic, it should be -straightforward:: - - $ pypy-c - >>>> import cppyy - >>>> cppyy.load_reflection_info("libMyClassDict.so") - - >>>> myinst = cppyy.gbl.MyClass(42) - >>>> print myinst.GetMyInt() - 42 - >>>> myinst.SetMyInt(33) - >>>> print myinst.m_myint - 33 - >>>> myinst.m_myint = 77 - >>>> print myinst.GetMyInt() - 77 - >>>> help(cppyy.gbl.MyClass) # shows that normal python introspection works - -That's all there is to it! - - -Automatic class loader ----------------------- - -There is one big problem in the code above, that prevents its use in a (large -scale) production setting: the explicit loading of the reflection library. -Clearly, if explicit load statements such as these show up in code downstream -from the ``MyClass`` package, then that prevents the ``MyClass`` author from -repackaging or even simply renaming the dictionary library. - -The solution is to make use of an automatic class loader, so that downstream -code never has to call ``load_reflection_info()`` directly. -The class loader makes use of so-called rootmap files, which ``genreflex`` -can produce. -These files contain the list of available C++ classes and specify the library -that needs to be loaded for their use (as an aside, this listing allows for a -cross-check to see whether reflection info is generated for all classes that -you expect). -By convention, the rootmap files should be located next to the reflection info -libraries, so that they can be found through the normal shared library search -path. -They can be concatenated together, or consist of a single rootmap file per -library. -For example:: - - $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling - -where the first option (``--rootmap``) specifies the output file name, and the -second option (``--rootmap-lib``) the name of the reflection library where -``MyClass`` will live. -It is necessary to provide that name explicitly, since it is only in the -separate linking step where this name is fixed. -If the second option is not given, the library is assumed to be libMyClass.so, -a name that is derived from the name of the header file. - -With the rootmap file in place, the above example can be rerun without explicit -loading of the reflection info library:: - - $ pypy-c - >>>> import cppyy - >>>> myinst = cppyy.gbl.MyClass(42) - >>>> print myinst.GetMyInt() - 42 - >>>> # etc. ... - -As a caveat, note that the class loader is currently limited to classes only. - - -Advanced example ----------------- - -The following snippet of C++ is very contrived, to allow showing that such -pathological code can be handled and to show how certain features play out in -practice:: - - $ cat MyAdvanced.h - #include - - class Base1 { - public: - Base1(int i) : m_i(i) {} - virtual ~Base1() {} - int m_i; - }; - - class Base2 { - public: - Base2(double d) : m_d(d) {} - virtual ~Base2() {} - double m_d; - }; - - class C; - - class Derived : public virtual Base1, public virtual Base2 { - public: - Derived(const std::string& name, int i, double d) : Base1(i), Base2(d), m_name(name) {} - virtual C* gimeC() { return (C*)0; } - std::string m_name; - }; - - Base2* BaseFactory(const std::string& name, int i, double d) { - return new Derived(name, i, d); - } - -This code is still only in a header file, with all functions inline, for -convenience of the example. -If the implementations live in a separate source file or shared library, the -only change needed is to link those in when building the reflection library. - -If you were to run ``genreflex`` like above in the basic example, you will -find that not all classes of interest will be reflected, nor will be the -global factory function. -In particular, ``std::string`` will be missing, since it is not defined in -this header file, but in a header file that is included. -In practical terms, general classes such as ``std::string`` should live in a -core reflection set, but for the moment assume we want to have it in the -reflection library that we are building for this example. - -The ``genreflex`` script can be steered using a so-called `selection file`_ -(see "Generating Reflex Dictionaries") -which is a simple XML file specifying, either explicitly or by using a -pattern, which classes, variables, namespaces, etc. to select from the given -header file. -With the aid of a selection file, a large project can be easily managed: -simply ``#include`` all relevant headers into a single header file that is -handed to ``genreflex``. -In fact, if you hand multiple header files to ``genreflex``, then a selection -file is almost obligatory: without it, only classes from the last header will -be selected. -Then, apply a selection file to pick up all the relevant classes. -For our purposes, the following rather straightforward selection will do -(the name ``lcgdict`` for the root is historical, but required):: - - $ cat MyAdvanced.xml - - - - - - - -.. _selection file: https://root.cern.ch/how/how-use-reflex - -Now the reflection info can be generated and compiled:: - - $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling - -and subsequently be used from PyPy:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info("libAdvExDict.so") - - >>>> d = cppyy.gbl.BaseFactory("name", 42, 3.14) - >>>> type(d) - - >>>> isinstance(d, cppyy.gbl.Base1) - True - >>>> isinstance(d, cppyy.gbl.Base2) - True - >>>> d.m_i, d.m_d - (42, 3.14) - >>>> d.m_name == "name" - True - >>>> - -Again, that's all there is to it! - -A couple of things to note, though. -If you look back at the C++ definition of the ``BaseFactory`` function, -you will see that it declares the return type to be a ``Base2``, yet the -bindings return an object of the actual type ``Derived``? -This choice is made for a couple of reasons. -First, it makes method dispatching easier: if bound objects are always their -most derived type, then it is easy to calculate any offsets, if necessary. -Second, it makes memory management easier: the combination of the type and -the memory address uniquely identifies an object. -That way, it can be recycled and object identity can be maintained if it is -entered as a function argument into C++ and comes back to PyPy as a return -value. -Last, but not least, casting is decidedly unpythonistic. -By always providing the most derived type known, casting becomes unnecessary. -For example, the data member of ``Base2`` is simply directly available. -Note also that the unreflected ``gimeC`` method of ``Derived`` does not -preclude its use. -It is only the ``gimeC`` method that is unusable as long as class ``C`` is -unknown to the system. - - -Features --------- - -The following is not meant to be an exhaustive list, since cppyy is still -under active development. -Furthermore, the intention is that every feature is as natural as possible on -the python side, so if you find something missing in the list below, simply -try it out. -It is not always possible to provide exact mapping between python and C++ -(active memory management is one such case), but by and large, if the use of a -feature does not strike you as obvious, it is more likely to simply be a bug. -That is a strong statement to make, but also a worthy goal. -For the C++ side of the examples, refer to this :doc:`example code `, which was -bound using:: - - $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling - -* **abstract classes**: Are represented as python classes, since they are - needed to complete the inheritance hierarchies, but will raise an exception - if an attempt is made to instantiate from them. - Example:: - - >>>> from cppyy.gbl import AbstractClass, ConcreteClass - >>>> a = AbstractClass() - Traceback (most recent call last): - File "", line 1, in - TypeError: cannot instantiate abstract class 'AbstractClass' - >>>> issubclass(ConcreteClass, AbstractClass) - True - >>>> c = ConcreteClass() - >>>> isinstance(c, AbstractClass) - True - >>>> - -* **arrays**: Supported for builtin data types only, as used from module - ``array``. - Out-of-bounds checking is limited to those cases where the size is known at - compile time (and hence part of the reflection info). - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> from array import array - >>>> c = ConcreteClass() - >>>> c.array_method(array('d', [1., 2., 3., 4.]), 4) - 1 2 3 4 - >>>> - -* **builtin data types**: Map onto the expected equivalent python types, with - the caveat that there may be size differences, and thus it is possible that - exceptions are raised if an overflow is detected. - -* **casting**: Is supposed to be unnecessary. - Object pointer returns from functions provide the most derived class known - in the hierarchy of the object being returned. - This is important to preserve object identity as well as to make casting, - a pure C++ feature after all, superfluous. - Example:: - - >>>> from cppyy.gbl import AbstractClass, ConcreteClass - >>>> c = ConcreteClass() - >>>> ConcreteClass.show_autocast.__doc__ - 'AbstractClass* ConcreteClass::show_autocast()' - >>>> d = c.show_autocast() - >>>> type(d) - - >>>> - - However, if need be, you can perform C++-style reinterpret_casts (i.e. - without taking offsets into account), by taking and rebinding the address - of an object:: - - >>>> from cppyy import addressof, bind_object - >>>> e = bind_object(addressof(d), AbstractClass) - >>>> type(e) - - >>>> - -* **classes and structs**: Get mapped onto python classes, where they can be - instantiated as expected. - If classes are inner classes or live in a namespace, their naming and - location will reflect that. - Example:: - - >>>> from cppyy.gbl import ConcreteClass, Namespace - >>>> ConcreteClass == Namespace.ConcreteClass - False - >>>> n = Namespace.ConcreteClass.NestedClass() - >>>> type(n) - - >>>> - -* **data members**: Public data members are represented as python properties - and provide read and write access on instances as expected. - Private and protected data members are not accessible. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() - >>>> c.m_int - 42 - >>>> - -* **default arguments**: C++ default arguments work as expected, but python - keywords are not supported. - It is technically possible to support keywords, but for the C++ interface, - the formal argument names have no meaning and are not considered part of the - API, hence it is not a good idea to use keywords. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() # uses default argument - >>>> c.m_int - 42 - >>>> c = ConcreteClass(13) - >>>> c.m_int - 13 - >>>> - -* **doc strings**: The doc string of a method or function contains the C++ - arguments and return types of all overloads of that name, as applicable. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> print ConcreteClass.array_method.__doc__ - void ConcreteClass::array_method(int*, int) - void ConcreteClass::array_method(double*, int) - >>>> - -* **enums**: Are translated as ints with no further checking. - -* **functions**: Work as expected and live in their appropriate namespace - (which can be the global one, ``cppyy.gbl``). - -* **inheritance**: All combinations of inheritance on the C++ (single, - multiple, virtual) are supported in the binding. - However, new python classes can only use single inheritance from a bound C++ - class. - Multiple inheritance would introduce two "this" pointers in the binding. - This is a current, not a fundamental, limitation. - The C++ side will not see any overridden methods on the python side, as - cross-inheritance is planned but not yet supported. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> help(ConcreteClass) - Help on class ConcreteClass in module __main__: - - class ConcreteClass(AbstractClass) - | Method resolution order: - | ConcreteClass - | AbstractClass - | cppyy.CPPObject - | __builtin__.CPPInstance - | __builtin__.object - | - | Methods defined here: - | - | ConcreteClass(self, *args) - | ConcreteClass::ConcreteClass(const ConcreteClass&) - | ConcreteClass::ConcreteClass(int) - | ConcreteClass::ConcreteClass() - | - etc. .... - -* **memory**: C++ instances created by calling their constructor from python - are owned by python. - You can check/change the ownership with the _python_owns flag that every - bound instance carries. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() - >>>> c._python_owns # True: object created in Python - True - >>>> - -* **methods**: Are represented as python methods and work as expected. - They are first class objects and can be bound to an instance. - Virtual C++ methods work as expected. - To select a specific virtual method, do like with normal python classes - that override methods: select it from the class that you need, rather than - calling the method on the instance. - To select a specific overload, use the __dispatch__ special function, which - takes the name of the desired method and its signature (which can be - obtained from the doc string) as arguments. - -* **namespaces**: Are represented as python classes. - Namespaces are more open-ended than classes, so sometimes initial access may - result in updates as data and functions are looked up and constructed - lazily. - Thus the result of ``dir()`` on a namespace shows the classes available, - even if they may not have been created yet. - It does not show classes that could potentially be loaded by the class - loader. - Once created, namespaces are registered as modules, to allow importing from - them. - Namespace currently do not work with the class loader. - Fixing these bootstrap problems is on the TODO list. - The global namespace is ``cppyy.gbl``. - -* **NULL**: Is represented as ``cppyy.gbl.nullptr``. - In C++11, the keyword ``nullptr`` is used to represent ``NULL``. - For clarity of intent, it is recommended to use this instead of ``None`` - (or the integer ``0``, which can serve in some cases), as ``None`` is better - understood as ``void`` in C++. - -* **operator conversions**: If defined in the C++ class and a python - equivalent exists (i.e. all builtin integer and floating point types, as well - as ``bool``), it will map onto that python conversion. - Note that ``char*`` is mapped onto ``__str__``. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> print ConcreteClass() - Hello operator const char*! - >>>> - -* **operator overloads**: If defined in the C++ class and if a python - equivalent is available (not always the case, think e.g. of ``operator||``), - then they work as expected. - Special care needs to be taken for global operator overloads in C++: first, - make sure that they are actually reflected, especially for the global - overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterate over a vector). - Second, make sure that reflection info is loaded in the proper order. - I.e. that these global overloads are available before use. - -* **pointers**: For builtin data types, see arrays. - For objects, a pointer to an object and an object looks the same, unless - the pointer is a data member. - In that case, assigning to the data member will cause a copy of the pointer - and care should be taken about the object's life time. - If a pointer is a global variable, the C++ side can replace the underlying - object and the python side will immediately reflect that. - -* **PyObject***: Arguments and return types of ``PyObject*`` can be used, and - passed on to CPython API calls. - Since these CPython-like objects need to be created and tracked (this all - happens through ``cpyext``) this interface is not particularly fast. - -* **static data members**: Are represented as python property objects on the - class and the meta-class. - Both read and write access is as expected. - -* **static methods**: Are represented as python's ``staticmethod`` objects - and can be called both from the class as well as from instances. - -* **strings**: The std::string class is considered a builtin C++ type and - mixes quite well with python's str. - Python's str can be passed where a ``const char*`` is expected, and an str - will be returned if the return type is ``const char*``. - -* **templated classes**: Are represented in a meta-class style in python. - This may look a little bit confusing, but conceptually is rather natural. - For example, given the class ``std::vector``, the meta-class part would - be ``std.vector``. - Then, to get the instantiation on ``int``, do ``std.vector(int)`` and to - create an instance of that class, do ``std.vector(int)()``:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info('libexampleDict.so') - >>>> cppyy.gbl.std.vector # template metatype - - >>>> cppyy.gbl.std.vector(int) # instantiates template -> class - '> - >>>> cppyy.gbl.std.vector(int)() # instantiates class -> object - <__main__.std::vector object at 0x00007fe480ba4bc0> - >>>> - - Note that templates can be build up by handing actual types to the class - instantiation (as done in this vector example), or by passing in the list of - template arguments as a string. - The former is a lot easier to work with if you have template instantiations - using classes that themselves are templates in the arguments (think e.g a - vector of vectors). - All template classes must already exist in the loaded reflection info, they - do not work (yet) with the class loader. - - For compatibility with other bindings generators, use of square brackets - instead of parenthesis to instantiate templates is supported as well. - -* **templated functions**: Automatically participate in overloading and are - used in the same way as other global functions. - -* **templated methods**: For now, require an explicit selection of the - template parameters. - This will be changed to allow them to participate in overloads as expected. - -* **typedefs**: Are simple python references to the actual classes to which - they refer. - -* **unary operators**: Are supported if a python equivalent exists, and if the - operator is defined in the C++ class. - -You can always find more detailed examples and see the full of supported -features by looking at the tests in pypy/module/cppyy/test. - -If a feature or reflection info is missing, this is supposed to be handled -gracefully. -In fact, there are unit tests explicitly for this purpose (even as their use -becomes less interesting over time, as the number of missing features -decreases). -Only when a missing feature is used, should there be an exception. -For example, if no reflection info is available for a return type, then a -class that has a method with that return type can still be used. -Only that one specific method can not be used. - - -Templates ---------- - -Templates can be automatically instantiated, assuming the appropriate header -files have been loaded or are accessible to the class loader. -This is the case for example for all of STL. -For example:: - - $ cat MyTemplate.h - #include - - class MyClass { - public: - MyClass(int i = -99) : m_i(i) {} - MyClass(const MyClass& s) : m_i(s.m_i) {} - MyClass& operator=(const MyClass& s) { m_i = s.m_i; return *this; } - ~MyClass() {} - int m_i; - }; - -Run the normal ``genreflex`` and compilation steps:: - - $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling - -Subsequent use should be as expected. -Note the meta-class style of "instantiating" the template:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info("libTemplateDict.so") - >>>> std = cppyy.gbl.std - >>>> MyClass = cppyy.gbl.MyClass - >>>> v = std.vector(MyClass)() - >>>> v += [MyClass(1), MyClass(2), MyClass(3)] - >>>> for m in v: - .... print m.m_i, - .... - 1 2 3 - >>>> - -The arguments to the template instantiation can either be a string with the -full list of arguments, or the explicit classes. -The latter makes for easier code writing if the classes passed to the -instantiation are themselves templates. - - -The fast lane -------------- - -By default, cppyy will use direct function pointers through `CFFI`_ whenever -possible. If this causes problems for you, you can disable it by setting the -CPPYY_DISABLE_FASTPATH environment variable. - -.. _CFFI: https://cffi.readthedocs.io/en/latest/ - - -CPython -------- - -Most of the ideas in cppyy come originally from the `PyROOT`_ project, which -contains a CPython-based cppyy.py module (with similar dependencies as the -one that comes with PyPy). -A standalone pip-installable version is planned, but for now you can install -ROOT through your favorite distribution installer (available in the science -section). - -.. _PyROOT: https://root.cern.ch/pyroot - -There are a couple of minor differences between the two versions of cppyy -(the CPython version has a few more features). -Work is on-going to integrate the nightly tests of both to make sure their -feature sets are equalized. - - -Python3 -------- - -The CPython version of cppyy supports Python3, assuming your packager has -build the backend for it. -The cppyy module has not been tested with the `Py3k`_ version of PyPy. -Note that the generated reflection information (from ``genreflex``) is fully -independent of Python, and does not need to be rebuild when switching versions -or interpreters. - -.. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k - - -.. toctree:: - :hidden: - - cppyy_example diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -61,29 +61,23 @@ .. _libffi: http://sourceware.org/libffi/ -Cling and cppyy ---------------- +cppyy +----- -The builtin :doc:`cppyy ` module uses reflection information, provided by -`Cling`_ (which needs to be `installed separately`_), of C/C++ code to -automatically generate bindings at runtime. -In Python, classes and functions are always runtime structures, so when they -are generated matters not for performance. -However, if the backend itself is capable of dynamic behavior, it is a much -better functional match, allowing tighter integration and more natural -language mappings. +For C++, `cppyy`_ is an automated bindings generator available for both +PyPy and CPython. +``cppyy`` relies on declarations from C++ header files to dynamically +construct Python equivalent classes, functions, variables, etc. +It is designed for use by large scale programs and supports modern C++. +With PyPy, it leverages the built-in ``_cppyy`` module, allowing the JIT to +remove most of the cross-language overhead. -The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove -most cross-language call overhead. +To install, run ``pip install cppyy``. +Further details are available in the `full documentation`_. -:doc:Full details are `available here `. +.. _cppyy: http://cppyy.readthedocs.org/ +.. _`full documentation`: http://cppyy.readthedocs.org/ -.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend -.. _Cling: https://root.cern.ch/cling - -.. toctree:: - - cppyy RPython Mixed Modules --------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -25,3 +25,8 @@ .. branch: cpyext-hash_notimpl If ``tp_hash`` is ``PyObject_HashNotImplemented``, set ``obj.__dict__['__hash__']`` to None + +.. branch: cppyy-packaging + +Renaming of ``cppyy`` to ``_cppyy``. +The former is now an external package installable with ``pip install cppyy``. diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/_cppyy/__init__.py rename from pypy/module/cppyy/__init__.py rename to pypy/module/_cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/_cppyy/__init__.py @@ -33,11 +33,11 @@ # pythonization functions may be written in RPython, but the interp2app # code generation is not, so give it a chance to run now - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi capi.register_pythonizations(space) def startup(self, space): - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi capi.verify_backend(space) # may raise ImportError space.call_method(self, '_init_pythonify') diff --git a/pypy/module/cppyy/backend/create_cppyy_package.py b/pypy/module/_cppyy/backend/create_cppyy_package.py rename from pypy/module/cppyy/backend/create_cppyy_package.py rename to pypy/module/_cppyy/backend/create_cppyy_package.py diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/_cppyy/bench/Makefile rename from pypy/module/cppyy/bench/Makefile rename to pypy/module/_cppyy/bench/Makefile diff --git a/pypy/module/cppyy/bench/bench02.cxx b/pypy/module/_cppyy/bench/bench02.cxx rename from pypy/module/cppyy/bench/bench02.cxx rename to pypy/module/_cppyy/bench/bench02.cxx diff --git a/pypy/module/cppyy/bench/bench02.h b/pypy/module/_cppyy/bench/bench02.h rename from pypy/module/cppyy/bench/bench02.h rename to pypy/module/_cppyy/bench/bench02.h diff --git a/pypy/module/cppyy/bench/bench02.xml b/pypy/module/_cppyy/bench/bench02.xml rename from pypy/module/cppyy/bench/bench02.xml rename to pypy/module/_cppyy/bench/bench02.xml diff --git a/pypy/module/cppyy/bench/hsimple.C b/pypy/module/_cppyy/bench/hsimple.C rename from pypy/module/cppyy/bench/hsimple.C rename to pypy/module/_cppyy/bench/hsimple.C diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/_cppyy/bench/hsimple.py rename from pypy/module/cppyy/bench/hsimple.py rename to pypy/module/_cppyy/bench/hsimple.py diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/_cppyy/bench/hsimple_rflx.py rename from pypy/module/cppyy/bench/hsimple_rflx.py rename to pypy/module/_cppyy/bench/hsimple_rflx.py diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/_cppyy/capi/__init__.py rename from pypy/module/cppyy/capi/__init__.py rename to pypy/module/_cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/_cppyy/capi/__init__.py @@ -9,10 +9,10 @@ # the selection of the desired backend (default is Reflex). # choose C-API access method: -from pypy.module.cppyy.capi.loadable_capi import * -#from pypy.module.cppyy.capi.builtin_capi import * +from pypy.module._cppyy.capi.loadable_capi import * +#from pypy.module._cppyy.capi.builtin_capi import * -from pypy.module.cppyy.capi.capi_types import C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_OBJECT,\ C_NULL_TYPE, C_NULL_OBJECT def direct_ptradd(ptr, offset): diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/_cppyy/capi/builtin_capi.py rename from pypy/module/cppyy/capi/builtin_capi.py rename to pypy/module/_cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/_cppyy/capi/builtin_capi.py @@ -4,7 +4,7 @@ import cling_capi as backend -from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/_cppyy/capi/capi_types.py rename from pypy/module/cppyy/capi/capi_types.py rename to pypy/module/_cppyy/capi/capi_types.py diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/_cppyy/capi/cling_capi.py rename from pypy/module/cppyy/capi/cling_capi.py rename to pypy/module/_cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/_cppyy/capi/cling_capi.py @@ -11,7 +11,7 @@ from rpython.rlib import jit, libffi, rdynload from pypy.module._rawffi.array import W_ArrayInstance -from pypy.module.cppyy.capi.capi_types import C_OBJECT +from pypy.module._cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -99,7 +99,7 @@ def stdstring_c_str(space, w_self): """Return a python string taking into account \0""" - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) @@ -112,12 +112,12 @@ W_AbstractSeqIterObject.__init__(self, w_vector) # TODO: this should live in rpythonize.py or something so that the # imports can move to the top w/o getting circles - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy assert isinstance(w_vector, interp_cppyy.W_CPPInstance) vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) self.overload = vector.cppclass.get_overload("__getitem__") - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) @@ -131,7 +131,7 @@ self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) - from pypy.module.cppyy import converter + from pypy.module._cppyy import converter self.converter = converter.get_converter(space, v_type, '') self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) self.stride = v_size @@ -143,7 +143,7 @@ self.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) try: - from pypy.module.cppyy import capi # TODO: refector + from pypy.module._cppyy import capi # TODO: refector offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) except OperationError as e: @@ -186,7 +186,7 @@ _method_alias(space, w_pycppclass, "__str__", "c_str") if "vector" in name[:11]: # len('std::vector') == 11 - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi v_type = capi.c_stdvector_valuetype(space, name) if v_type: space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/_cppyy/capi/loadable_capi.py rename from pypy/module/cppyy/capi/loadable_capi.py rename to pypy/module/_cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/_cppyy/capi/loadable_capi.py @@ -9,9 +9,9 @@ from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc from pypy.module._cffi_backend import newtype -from pypy.module.cppyy import ffitypes +from pypy.module._cppyy import ffitypes -from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR @@ -599,7 +599,7 @@ def stdstring_c_str(space, w_self): """Return a python string taking into account \0""" - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) return space.newtext(c_stdstring2charp(space, cppstr._rawobject)) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/_cppyy/converter.py rename from pypy/module/cppyy/converter.py rename to pypy/module/_cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/_cppyy/converter.py @@ -9,7 +9,7 @@ from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance -from pypy.module.cppyy import helper, capi, ffitypes +from pypy.module._cppyy import helper, capi, ffitypes # Converter objects are used to translate between RPython and C++. They are # defined by the type name for which they provide conversion. Uses are for @@ -22,7 +22,7 @@ def get_rawobject(space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: rawobject = cppinstance.get_rawobject() @@ -31,14 +31,14 @@ return capi.C_NULL_OBJECT def set_rawobject(space, w_obj, address): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: assert lltype.typeOf(cppinstance._rawobject) == capi.C_OBJECT cppinstance._rawobject = rffi.cast(capi.C_OBJECT, address) def get_rawobject_nonnull(space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) if cppinstance: cppinstance._nullcheck() @@ -56,7 +56,7 @@ except Exception: pass # None or nullptr - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return space.is_true(space.is_(w_obj, space.w_None)) or \ space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) @@ -104,18 +104,18 @@ "no converter available for '%s'", self.name) def cffi_type(self, space): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def default_argument_libffi(self, space, address): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def from_memory(self, space, w_obj, w_pycppclass, offset): @@ -362,7 +362,7 @@ return state.c_voidp def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible class DoubleConverter(ffitypes.typeid(rffi.DOUBLE), FloatTypeConverterMixin, TypeConverter): @@ -442,7 +442,7 @@ address = self._get_raw_address(space, w_obj, offset) ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) if ptrval == 0: - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) arr = space.interp_w(W_Array, letter2tp(space, 'P')) return arr.fromaddress(space, ptrval, sys.maxint) @@ -488,12 +488,12 @@ typecode = 'V' def __init__(self, space, cppclass): - from pypy.module.cppyy.interp_cppyy import W_CPPClass + from pypy.module._cppyy.interp_cppyy import W_CPPClass assert isinstance(cppclass, W_CPPClass) self.cppclass = cppclass def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance if isinstance(w_obj, W_CPPInstance): if capi.c_is_subtype(space, w_obj.cppclass, self.cppclass): rawobject = w_obj.get_rawobject() @@ -521,12 +521,12 @@ class InstanceConverter(InstanceRefConverter): def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible # TODO: by-value is a jit_libffi special case def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False) def to_memory(self, space, w_obj, w_value, offset): @@ -547,7 +547,7 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False) def to_memory(self, space, w_obj, w_value, offset): @@ -570,30 +570,30 @@ def convert_argument_libffi(self, space, w_obj, address, call_local): # TODO: finalize_call not yet called for fast call (see interp_cppyy.py) - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def finalize_call(self, space, w_obj, call_local): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance assert isinstance(w_obj, W_CPPInstance) r = rffi.cast(rffi.VOIDPP, call_local) w_obj._rawobject = rffi.cast(capi.C_OBJECT, r[0]) def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False, is_ref=True) class StdStringConverter(InstanceConverter): def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy.interp_cppyy import W_CPPInstance + from pypy.module._cppyy.interp_cppyy import W_CPPInstance if isinstance(w_obj, W_CPPInstance): arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.c_stdstring2stdstring(space, arg) @@ -604,7 +604,7 @@ try: address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) assign = self.cppclass.get_overload("__assign__") - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy assign.call( interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False), [w_value]) except Exception: @@ -619,7 +619,7 @@ typecode = 'V' def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) InstancePtrConverter.__init__(self, space, cppclass) @@ -642,7 +642,7 @@ def convert_argument_libffi(self, space, w_obj, address, call_local): # TODO: free_argument not yet called for fast call (see interp_cppyy.py) - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible # proposed implementation: @@ -709,11 +709,11 @@ # 3) TODO: accept ref as pointer # 4) generalized cases (covers basically all user classes) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, clean_name) if cppclass: # type check for the benefit of the annotator - from pypy.module.cppyy.interp_cppyy import W_CPPClass + from pypy.module._cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) if compound == "*": return InstancePtrConverter(space, cppclass) @@ -874,12 +874,12 @@ class TStringConverter(InstanceConverter): def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, "TString") InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy if isinstance(w_obj, interp_cppyy.W_CPPInstance): arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.backend.c_TString2TString(space, arg) diff --git a/pypy/module/cppyy/executor.py b/pypy/module/_cppyy/executor.py rename from pypy/module/cppyy/executor.py rename to pypy/module/_cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/_cppyy/executor.py @@ -8,7 +8,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_simple_shape from pypy.module._rawffi.array import W_Array, W_ArrayInstance -from pypy.module.cppyy import helper, capi, ffitypes +from pypy.module._cppyy import helper, capi, ffitypes # Executor objects are used to dispatch C++ methods. They are defined by their # return type only: arguments are converted by Converter objects, and Executors @@ -31,7 +31,7 @@ pass def cffi_type(self, space): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible def execute(self, space, cppmethod, cppthis, num_args, args): @@ -39,7 +39,7 @@ "return type not available or supported") def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible @@ -58,7 +58,7 @@ ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.newtext(self.typecode))) if ptrval == 0: - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) return arr.fromaddress(space, ptrval, sys.maxint) @@ -138,7 +138,7 @@ class ConstructorExecutor(FunctionExecutor): def execute(self, space, cppmethod, cpptype, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT return space.newlong(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here @@ -156,7 +156,7 @@ return state.c_voidp def execute(self, space, cppmethod, cppthis, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) pyres = interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) @@ -165,34 +165,34 @@ def execute_libffi(self, space, cif_descr, funcaddr, buffer): jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer) result = rffi.ptradd(buffer, cif_descr.exchange_result) - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy ptr_result = rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, result)[0]) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) class InstancePtrPtrExecutor(InstancePtrExecutor): def execute(self, space, cppmethod, cppthis, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy voidp_result = capi.c_call_r(space, cppmethod, cppthis, num_args, args) ref_address = rffi.cast(rffi.VOIDPP, voidp_result) ptr_result = rffi.cast(capi.C_OBJECT, ref_address[0]) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible class InstanceExecutor(InstancePtrExecutor): def execute(self, space, cppmethod, cppthis, num_args, args): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args, self.cppclass) ptr_result = rffi.cast(capi.C_OBJECT, long_result) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass, do_cast=False, python_owns=True, fresh=True) def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible @@ -205,13 +205,13 @@ return space.newbytes(pystr) def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible class StdStringRefExecutor(InstancePtrExecutor): def __init__(self, space, cppclass): - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) InstancePtrExecutor.__init__(self, space, cppclass) @@ -277,11 +277,11 @@ pass # 3) types/classes, either by ref/ptr or by value - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppclass = interp_cppyy.scope_byname(space, clean_name) if cppclass: # type check for the benefit of the annotator - from pypy.module.cppyy.interp_cppyy import W_CPPClass + from pypy.module._cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) if compound == '': return InstanceExecutor(space, cppclass) diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/_cppyy/ffitypes.py rename from pypy/module/cppyy/ffitypes.py rename to pypy/module/_cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/_cppyy/ffitypes.py @@ -82,12 +82,13 @@ value = rffi.cast(rffi.CHAR, space.c_int_w(w_value)) else: - value = space.bytes_w(w_value) + value = space.text_w(w_value) + if len(value) != 1: + raise oefmt(space.w_ValueError, + "char expected, got string of size %d", len(value)) + value = rffi.cast(rffi.CHAR, value[0]) - if len(value) != 1: - raise oefmt(space.w_ValueError, - "char expected, got string of size %d", len(value)) - return value[0] # turn it into a "char" to the annotator + return value # turn it into a "char" to the annotator def cffi_type(self, space): state = space.fromcache(State) diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/_cppyy/genreflex-methptrgetter.patch rename from pypy/module/cppyy/genreflex-methptrgetter.patch rename to pypy/module/_cppyy/genreflex-methptrgetter.patch diff --git a/pypy/module/cppyy/helper.py b/pypy/module/_cppyy/helper.py rename from pypy/module/cppyy/helper.py rename to pypy/module/_cppyy/helper.py --- a/pypy/module/cppyy/helper.py +++ b/pypy/module/_cppyy/helper.py @@ -64,7 +64,7 @@ _operator_mappings = {} def map_operator_name(space, cppname, nargs, result_type): - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi if cppname[0:8] == "operator": op = cppname[8:].strip(' ') diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/_cppyy/include/capi.h rename from pypy/module/cppyy/include/capi.h rename to pypy/module/_cppyy/include/capi.h diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/_cppyy/include/clingcwrapper.h rename from pypy/module/cppyy/include/clingcwrapper.h rename to pypy/module/_cppyy/include/clingcwrapper.h diff --git a/pypy/module/cppyy/include/cpp_cppyy.h b/pypy/module/_cppyy/include/cpp_cppyy.h rename from pypy/module/cppyy/include/cpp_cppyy.h rename to pypy/module/_cppyy/include/cpp_cppyy.h diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/_cppyy/include/cppyy.h rename from pypy/module/cppyy/include/cppyy.h rename to pypy/module/_cppyy/include/cppyy.h diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/_cppyy/interp_cppyy.py rename from pypy/module/cppyy/interp_cppyy.py rename to pypy/module/_cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/_cppyy/interp_cppyy.py @@ -1,4 +1,4 @@ -import pypy.module.cppyy.capi as capi +import pypy.module._cppyy.capi as capi from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -12,7 +12,7 @@ from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from pypy.module._cffi_backend import ctypefunc -from pypy.module.cppyy import converter, executor, ffitypes, helper +from pypy.module._cppyy import converter, executor, ffitypes, helper class FastCallNotPossible(Exception): diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/_cppyy/pythonify.py rename from pypy/module/cppyy/pythonify.py rename to pypy/module/_cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/_cppyy/pythonify.py @@ -1,5 +1,5 @@ # NOT_RPYTHON -# do not load cppyy here, see _init_pythonify() +# do not load _cppyy here, see _init_pythonify() import types import sys @@ -35,8 +35,8 @@ def _arg_to_str(self, arg): if arg == str: - import cppyy - arg = cppyy._std_string_name() + import _cppyy + arg = _cppyy._std_string_name() elif type(arg) != str: arg = arg.__name__ return arg @@ -99,8 +99,8 @@ else: d = dict() def cpp_proxy_loader(cls): - import cppyy - cpp_proxy = cppyy._scope_byname(cls.__name__ != '::' and cls.__name__ or '') + import _cppyy + cpp_proxy = _cppyy._scope_byname(cls.__name__ != '::' and cls.__name__ or '') del cls.__class__._cpp_proxy cls._cpp_proxy = cpp_proxy return cpp_proxy @@ -126,7 +126,7 @@ setattr(metans, dm_name, cppdm) modname = pycppns.__name__.replace('::', '.') - sys.modules['cppyy.gbl.'+modname] = pycppns + sys.modules['_cppyy.gbl.'+modname] = pycppns return pycppns def _drop_cycles(bases): @@ -141,8 +141,8 @@ def make_new(class_name): def __new__(cls, *args): # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) + import _cppyy + instance = _cppyy.bind_object(0, class_name, True) if not instance.__class__ is cls: instance.__class__ = cls # happens for derived class return instance @@ -202,8 +202,8 @@ # the call to register will add back-end specific pythonizations and thus # needs to run first, so that the generic pythonizations can use them - import cppyy - cppyy._register_class(pycppclass) + import _cppyy + _cppyy._register_class(pycppclass) _pythonize(pycppclass) return pycppclass @@ -212,18 +212,18 @@ def get_pycppitem(scope, name): - import cppyy + import _cppyy # resolve typedefs/aliases full_name = (scope == gbl) and name or (scope.__name__+'::'+name) - true_name = cppyy._resolve_name(full_name) + true_name = _cppyy._resolve_name(full_name) if true_name != full_name: return get_pycppclass(true_name) pycppitem = None # classes - cppitem = cppyy._scope_byname(true_name) + cppitem = _cppyy._scope_byname(true_name) if cppitem: if cppitem.is_namespace(): pycppitem = make_cppnamespace(scope, true_name, cppitem) @@ -233,7 +233,7 @@ # templates if not cppitem: - cppitem = cppyy._template_byname(true_name) + cppitem = _cppyy._template_byname(true_name) if cppitem: pycppitem = make_cpptemplatetype(scope, name) setattr(scope, name, pycppitem) @@ -323,7 +323,7 @@ # general note: use 'in pyclass.__dict__' rather than 'hasattr' to prevent # adding pythonizations multiple times in derived classes - import cppyy + import _cppyy # map __eq__/__ne__ through a comparison to None if '__eq__' in pyclass.__dict__: @@ -362,8 +362,8 @@ # also the fallback on the indexed __getitem__, but that is slower) if not 'vector' in pyclass.__name__[:11] and \ ('begin' in pyclass.__dict__ and 'end' in pyclass.__dict__): - if cppyy._scope_byname(pyclass.__name__+'::iterator') or \ - cppyy._scope_byname(pyclass.__name__+'::const_iterator'): + if _cppyy._scope_byname(pyclass.__name__+'::iterator') or \ + _cppyy._scope_byname(pyclass.__name__+'::const_iterator'): def __iter__(self): i = self.begin() while i != self.end(): @@ -383,7 +383,7 @@ pyclass.__getitem__ = python_style_getitem # string comparisons - if pyclass.__name__ == cppyy._std_string_name(): + if pyclass.__name__ == _cppyy._std_string_name(): def eq(self, other): if type(other) == pyclass: return self.c_str() == other.c_str() @@ -410,29 +410,29 @@ try: return _loaded_dictionaries[name] except KeyError: - import cppyy - lib = cppyy._load_dictionary(name) + import _cppyy + lib = _cppyy._load_dictionary(name) _loaded_dictionaries[name] = lib return lib def _init_pythonify(): - # cppyy should not be loaded at the module level, as that will trigger a - # call to space.getbuiltinmodule(), which will cause cppyy to be loaded - # at pypy-c startup, rather than on the "import cppyy" statement - import cppyy + # _cppyy should not be loaded at the module level, as that will trigger a + # call to space.getbuiltinmodule(), which will cause _cppyy to be loaded + # at pypy-c startup, rather than on the "import _cppyy" statement + import _cppyy # root of all proxy classes: CPPInstance in pythonify exists to combine the # CPPClass meta class with the interp-level CPPInstanceBase global CPPInstance - class CPPInstance(cppyy.CPPInstanceBase): + class CPPInstance(_cppyy.CPPInstanceBase): __metaclass__ = CPPClass pass # class generator callback - cppyy._set_class_generator(clgen_callback) + _cppyy._set_class_generator(clgen_callback) # function generator callback - cppyy._set_function_generator(fngen_callback) + _cppyy._set_function_generator(fngen_callback) # user interface objects (note the two-step of not calling scope_byname here: # creation of global functions may cause the creation of classes in the global @@ -450,14 +450,14 @@ setattr(gbl, 'internal_enum_type_t', int) # install nullptr as a unique reference - setattr(gbl, 'nullptr', cppyy._get_nullptr()) + setattr(gbl, 'nullptr', _cppyy._get_nullptr()) # install for user access - cppyy.gbl = gbl + _cppyy.gbl = gbl # install as modules to allow importing from - sys.modules['cppyy.gbl'] = gbl - sys.modules['cppyy.gbl.std'] = gbl.std + sys.modules['_cppyy.gbl'] = gbl + sys.modules['_cppyy.gbl.std'] = gbl.std # user-defined pythonizations interface _pythonizations = {} diff --git a/pypy/module/cppyy/src/callcontext.h b/pypy/module/_cppyy/src/callcontext.h rename from pypy/module/cppyy/src/callcontext.h rename to pypy/module/_cppyy/src/callcontext.h diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/_cppyy/src/clingcwrapper.cxx rename from pypy/module/cppyy/src/clingcwrapper.cxx rename to pypy/module/_cppyy/src/clingcwrapper.cxx diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/_cppyy/src/dummy_backend.cxx rename from pypy/module/cppyy/src/dummy_backend.cxx rename to pypy/module/_cppyy/src/dummy_backend.cxx diff --git a/pypy/module/cppyy/test/Makefile b/pypy/module/_cppyy/test/Makefile rename from pypy/module/cppyy/test/Makefile rename to pypy/module/_cppyy/test/Makefile diff --git a/pypy/module/cppyy/test/__init__.py b/pypy/module/_cppyy/test/__init__.py rename from pypy/module/cppyy/test/__init__.py rename to pypy/module/_cppyy/test/__init__.py diff --git a/pypy/module/cppyy/test/advancedcpp.cxx b/pypy/module/_cppyy/test/advancedcpp.cxx rename from pypy/module/cppyy/test/advancedcpp.cxx rename to pypy/module/_cppyy/test/advancedcpp.cxx diff --git a/pypy/module/cppyy/test/advancedcpp.h b/pypy/module/_cppyy/test/advancedcpp.h rename from pypy/module/cppyy/test/advancedcpp.h rename to pypy/module/_cppyy/test/advancedcpp.h diff --git a/pypy/module/cppyy/test/advancedcpp.xml b/pypy/module/_cppyy/test/advancedcpp.xml rename from pypy/module/cppyy/test/advancedcpp.xml rename to pypy/module/_cppyy/test/advancedcpp.xml diff --git a/pypy/module/cppyy/test/advancedcpp2.cxx b/pypy/module/_cppyy/test/advancedcpp2.cxx rename from pypy/module/cppyy/test/advancedcpp2.cxx rename to pypy/module/_cppyy/test/advancedcpp2.cxx diff --git a/pypy/module/cppyy/test/advancedcpp2.h b/pypy/module/_cppyy/test/advancedcpp2.h rename from pypy/module/cppyy/test/advancedcpp2.h rename to pypy/module/_cppyy/test/advancedcpp2.h diff --git a/pypy/module/cppyy/test/advancedcpp2.xml b/pypy/module/_cppyy/test/advancedcpp2.xml rename from pypy/module/cppyy/test/advancedcpp2.xml rename to pypy/module/_cppyy/test/advancedcpp2.xml diff --git a/pypy/module/cppyy/test/advancedcpp2_LinkDef.h b/pypy/module/_cppyy/test/advancedcpp2_LinkDef.h rename from pypy/module/cppyy/test/advancedcpp2_LinkDef.h rename to pypy/module/_cppyy/test/advancedcpp2_LinkDef.h diff --git a/pypy/module/cppyy/test/advancedcpp_LinkDef.h b/pypy/module/_cppyy/test/advancedcpp_LinkDef.h rename from pypy/module/cppyy/test/advancedcpp_LinkDef.h rename to pypy/module/_cppyy/test/advancedcpp_LinkDef.h diff --git a/pypy/module/cppyy/test/bench1.cxx b/pypy/module/_cppyy/test/bench1.cxx rename from pypy/module/cppyy/test/bench1.cxx rename to pypy/module/_cppyy/test/bench1.cxx diff --git a/pypy/module/cppyy/test/bench1.py b/pypy/module/_cppyy/test/bench1.py rename from pypy/module/cppyy/test/bench1.py rename to pypy/module/_cppyy/test/bench1.py diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/_cppyy/test/conftest.py rename from pypy/module/cppyy/test/conftest.py rename to pypy/module/_cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/_cppyy/test/conftest.py @@ -3,7 +3,7 @@ @py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: - import pypy.module.cppyy.capi.loadable_capi as lcapi + import pypy.module._cppyy.capi.loadable_capi as lcapi if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex @@ -30,7 +30,7 @@ def pytest_configure(config): if py.path.local.sysfind('genreflex') is None: - import pypy.module.cppyy.capi.loadable_capi as lcapi + import pypy.module._cppyy.capi.loadable_capi as lcapi try: import ctypes ctypes.CDLL(lcapi.reflection_library) diff --git a/pypy/module/cppyy/test/crossing.cxx b/pypy/module/_cppyy/test/crossing.cxx rename from pypy/module/cppyy/test/crossing.cxx rename to pypy/module/_cppyy/test/crossing.cxx diff --git a/pypy/module/cppyy/test/crossing.h b/pypy/module/_cppyy/test/crossing.h rename from pypy/module/cppyy/test/crossing.h rename to pypy/module/_cppyy/test/crossing.h diff --git a/pypy/module/cppyy/test/crossing.xml b/pypy/module/_cppyy/test/crossing.xml rename from pypy/module/cppyy/test/crossing.xml rename to pypy/module/_cppyy/test/crossing.xml diff --git a/pypy/module/cppyy/test/crossing_LinkDef.h b/pypy/module/_cppyy/test/crossing_LinkDef.h rename from pypy/module/cppyy/test/crossing_LinkDef.h rename to pypy/module/_cppyy/test/crossing_LinkDef.h diff --git a/pypy/module/cppyy/test/datatypes.cxx b/pypy/module/_cppyy/test/datatypes.cxx rename from pypy/module/cppyy/test/datatypes.cxx rename to pypy/module/_cppyy/test/datatypes.cxx diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/_cppyy/test/datatypes.h rename from pypy/module/cppyy/test/datatypes.h rename to pypy/module/_cppyy/test/datatypes.h diff --git a/pypy/module/cppyy/test/datatypes.xml b/pypy/module/_cppyy/test/datatypes.xml rename from pypy/module/cppyy/test/datatypes.xml rename to pypy/module/_cppyy/test/datatypes.xml diff --git a/pypy/module/cppyy/test/datatypes_LinkDef.h b/pypy/module/_cppyy/test/datatypes_LinkDef.h rename from pypy/module/cppyy/test/datatypes_LinkDef.h rename to pypy/module/_cppyy/test/datatypes_LinkDef.h diff --git a/pypy/module/cppyy/test/example01.cxx b/pypy/module/_cppyy/test/example01.cxx rename from pypy/module/cppyy/test/example01.cxx rename to pypy/module/_cppyy/test/example01.cxx diff --git a/pypy/module/cppyy/test/example01.h b/pypy/module/_cppyy/test/example01.h rename from pypy/module/cppyy/test/example01.h rename to pypy/module/_cppyy/test/example01.h diff --git a/pypy/module/cppyy/test/example01.xml b/pypy/module/_cppyy/test/example01.xml rename from pypy/module/cppyy/test/example01.xml rename to pypy/module/_cppyy/test/example01.xml diff --git a/pypy/module/cppyy/test/example01_LinkDef.h b/pypy/module/_cppyy/test/example01_LinkDef.h rename from pypy/module/cppyy/test/example01_LinkDef.h rename to pypy/module/_cppyy/test/example01_LinkDef.h diff --git a/pypy/module/cppyy/test/fragile.cxx b/pypy/module/_cppyy/test/fragile.cxx rename from pypy/module/cppyy/test/fragile.cxx rename to pypy/module/_cppyy/test/fragile.cxx diff --git a/pypy/module/cppyy/test/fragile.h b/pypy/module/_cppyy/test/fragile.h rename from pypy/module/cppyy/test/fragile.h rename to pypy/module/_cppyy/test/fragile.h diff --git a/pypy/module/cppyy/test/fragile.xml b/pypy/module/_cppyy/test/fragile.xml rename from pypy/module/cppyy/test/fragile.xml rename to pypy/module/_cppyy/test/fragile.xml diff --git a/pypy/module/cppyy/test/fragile_LinkDef.h b/pypy/module/_cppyy/test/fragile_LinkDef.h rename from pypy/module/cppyy/test/fragile_LinkDef.h rename to pypy/module/_cppyy/test/fragile_LinkDef.h diff --git a/pypy/module/cppyy/test/iotypes.cxx b/pypy/module/_cppyy/test/iotypes.cxx rename from pypy/module/cppyy/test/iotypes.cxx rename to pypy/module/_cppyy/test/iotypes.cxx diff --git a/pypy/module/cppyy/test/iotypes.h b/pypy/module/_cppyy/test/iotypes.h rename from pypy/module/cppyy/test/iotypes.h rename to pypy/module/_cppyy/test/iotypes.h diff --git a/pypy/module/cppyy/test/iotypes.xml b/pypy/module/_cppyy/test/iotypes.xml rename from pypy/module/cppyy/test/iotypes.xml rename to pypy/module/_cppyy/test/iotypes.xml diff --git a/pypy/module/cppyy/test/iotypes_LinkDef.h b/pypy/module/_cppyy/test/iotypes_LinkDef.h rename from pypy/module/cppyy/test/iotypes_LinkDef.h rename to pypy/module/_cppyy/test/iotypes_LinkDef.h diff --git a/pypy/module/cppyy/test/operators.cxx b/pypy/module/_cppyy/test/operators.cxx rename from pypy/module/cppyy/test/operators.cxx rename to pypy/module/_cppyy/test/operators.cxx diff --git a/pypy/module/cppyy/test/operators.h b/pypy/module/_cppyy/test/operators.h rename from pypy/module/cppyy/test/operators.h rename to pypy/module/_cppyy/test/operators.h diff --git a/pypy/module/cppyy/test/operators.xml b/pypy/module/_cppyy/test/operators.xml rename from pypy/module/cppyy/test/operators.xml rename to pypy/module/_cppyy/test/operators.xml diff --git a/pypy/module/cppyy/test/operators_LinkDef.h b/pypy/module/_cppyy/test/operators_LinkDef.h rename from pypy/module/cppyy/test/operators_LinkDef.h rename to pypy/module/_cppyy/test/operators_LinkDef.h diff --git a/pypy/module/cppyy/test/overloads.cxx b/pypy/module/_cppyy/test/overloads.cxx rename from pypy/module/cppyy/test/overloads.cxx rename to pypy/module/_cppyy/test/overloads.cxx diff --git a/pypy/module/cppyy/test/overloads.h b/pypy/module/_cppyy/test/overloads.h rename from pypy/module/cppyy/test/overloads.h rename to pypy/module/_cppyy/test/overloads.h diff --git a/pypy/module/cppyy/test/overloads.xml b/pypy/module/_cppyy/test/overloads.xml rename from pypy/module/cppyy/test/overloads.xml rename to pypy/module/_cppyy/test/overloads.xml diff --git a/pypy/module/cppyy/test/overloads_LinkDef.h b/pypy/module/_cppyy/test/overloads_LinkDef.h rename from pypy/module/cppyy/test/overloads_LinkDef.h rename to pypy/module/_cppyy/test/overloads_LinkDef.h diff --git a/pypy/module/cppyy/test/simple_class.C b/pypy/module/_cppyy/test/simple_class.C rename from pypy/module/cppyy/test/simple_class.C rename to pypy/module/_cppyy/test/simple_class.C diff --git a/pypy/module/cppyy/test/std_streams.cxx b/pypy/module/_cppyy/test/std_streams.cxx rename from pypy/module/cppyy/test/std_streams.cxx rename to pypy/module/_cppyy/test/std_streams.cxx diff --git a/pypy/module/cppyy/test/std_streams.h b/pypy/module/_cppyy/test/std_streams.h rename from pypy/module/cppyy/test/std_streams.h rename to pypy/module/_cppyy/test/std_streams.h diff --git a/pypy/module/cppyy/test/std_streams.xml b/pypy/module/_cppyy/test/std_streams.xml rename from pypy/module/cppyy/test/std_streams.xml rename to pypy/module/_cppyy/test/std_streams.xml diff --git a/pypy/module/cppyy/test/std_streams_LinkDef.h b/pypy/module/_cppyy/test/std_streams_LinkDef.h rename from pypy/module/cppyy/test/std_streams_LinkDef.h rename to pypy/module/_cppyy/test/std_streams_LinkDef.h diff --git a/pypy/module/cppyy/test/stltypes.cxx b/pypy/module/_cppyy/test/stltypes.cxx rename from pypy/module/cppyy/test/stltypes.cxx rename to pypy/module/_cppyy/test/stltypes.cxx diff --git a/pypy/module/cppyy/test/stltypes.h b/pypy/module/_cppyy/test/stltypes.h rename from pypy/module/cppyy/test/stltypes.h rename to pypy/module/_cppyy/test/stltypes.h diff --git a/pypy/module/cppyy/test/stltypes.xml b/pypy/module/_cppyy/test/stltypes.xml rename from pypy/module/cppyy/test/stltypes.xml rename to pypy/module/_cppyy/test/stltypes.xml diff --git a/pypy/module/cppyy/test/stltypes_LinkDef.h b/pypy/module/_cppyy/test/stltypes_LinkDef.h rename from pypy/module/cppyy/test/stltypes_LinkDef.h rename to pypy/module/_cppyy/test/stltypes_LinkDef.h diff --git a/pypy/module/cppyy/test/support.py b/pypy/module/_cppyy/test/support.py rename from pypy/module/cppyy/test/support.py rename to pypy/module/_cppyy/test/support.py --- a/pypy/module/cppyy/test/support.py +++ b/pypy/module/_cppyy/test/support.py @@ -6,7 +6,7 @@ def setup_make(targetname): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - import pypy.module.cppyy.capi.loadable_capi as lcapi + import pypy.module._cppyy.capi.loadable_capi as lcapi popen = subprocess.Popen(["make", targetname], cwd=str(currpath), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = popen.communicate() diff --git a/pypy/module/cppyy/test/test_aclassloader.py b/pypy/module/_cppyy/test/test_aclassloader.py rename from pypy/module/cppyy/test/test_aclassloader.py rename to pypy/module/_cppyy/test/test_aclassloader.py --- a/pypy/module/cppyy/test/test_aclassloader.py +++ b/pypy/module/_cppyy/test/test_aclassloader.py @@ -12,17 +12,17 @@ class AppTestACLASSLOADER: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): cls.space.appexec([], """(): - import cppyy""") + import _cppyy""") def test01_class_autoloading(self): """Test whether a class can be found through .rootmap.""" - import cppyy - example01_class = cppyy.gbl.example01 + import _cppyy + example01_class = _cppyy.gbl.example01 assert example01_class - cl2 = cppyy.gbl.example01 + cl2 = _cppyy.gbl.example01 assert cl2 assert example01_class is cl2 diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/_cppyy/test/test_advancedcpp.py rename from pypy/module/cppyy/test/test_advancedcpp.py rename to pypy/module/_cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/_cppyy/test/test_advancedcpp.py @@ -1,11 +1,9 @@ import py, os, sys -from pypy.module.cppyy import capi - - currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("advancedcppDict.so")) + def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") @@ -15,21 +13,20 @@ raise OSError("'make' failed (see stderr)") class AppTestADVANCEDCPP: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.newtext(test_dct) - cls.w_capi_identity = cls.space.newtext(capi.identify()) cls.w_advanced = cls.space.appexec([], """(): - import cppyy - return cppyy.load_reflection_info(%r)""" % (test_dct, )) + import _cppyy + return _cppyy.load_reflection_info(%r)""" % (test_dct, )) def test01_default_arguments(self): """Test usage of default arguments""" - import cppyy + import _cppyy def test_defaulter(n, t): - defaulter = getattr(cppyy.gbl, '%s_defaulter' % n) + defaulter = getattr(_cppyy.gbl, '%s_defaulter' % n) d = defaulter() assert d.m_a == t(11) @@ -68,9 +65,9 @@ def test02_simple_inheritance(self): """Test binding of a basic inheritance structure""" - import cppyy - base_class = cppyy.gbl.base_class - derived_class = cppyy.gbl.derived_class + import _cppyy + base_class = _cppyy.gbl.base_class + derived_class = _cppyy.gbl.derived_class assert issubclass(derived_class, base_class) assert not issubclass(base_class, derived_class) @@ -122,8 +119,8 @@ def test03_namespaces(self): """Test access to namespaces and inner classes""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl assert gbl.a_ns is gbl.a_ns assert gbl.a_ns.d_ns is gbl.a_ns.d_ns @@ -149,10 +146,10 @@ def test03a_namespace_lookup_on_update(self): """Test whether namespaces can be shared across dictionaries.""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl - lib2 = cppyy.load_reflection_info("advancedcpp2Dict.so") + lib2 = _cppyy.load_reflection_info("advancedcpp2Dict.so") assert gbl.a_ns is gbl.a_ns assert gbl.a_ns.d_ns is gbl.a_ns.d_ns @@ -178,8 +175,8 @@ def test04_template_types(self): """Test bindings of templated types""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl assert gbl.T1 is gbl.T1 assert gbl.T2 is gbl.T2 @@ -244,8 +241,8 @@ def test05_abstract_classes(self): """Test non-instatiatability of abstract classes""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl raises(TypeError, gbl.a_class) raises(TypeError, gbl.some_abstract_class) @@ -259,12 +256,12 @@ def test06_datamembers(self): """Test data member access when using virtual inheritence""" - import cppyy - a_class = cppyy.gbl.a_class - b_class = cppyy.gbl.b_class - c_class_1 = cppyy.gbl.c_class_1 - c_class_2 = cppyy.gbl.c_class_2 - d_class = cppyy.gbl.d_class + import _cppyy + a_class = _cppyy.gbl.a_class + b_class = _cppyy.gbl.b_class + c_class_1 = _cppyy.gbl.c_class_1 + c_class_2 = _cppyy.gbl.c_class_2 + d_class = _cppyy.gbl.d_class assert issubclass(b_class, a_class) assert issubclass(c_class_1, a_class) @@ -353,8 +350,8 @@ def test07_pass_by_reference(self): """Test reference passing when using virtual inheritance""" - import cppyy - gbl = cppyy.gbl + import _cppyy + gbl = _cppyy.gbl b_class = gbl.b_class c_class = gbl.c_class_2 d_class = gbl.d_class @@ -386,71 +383,71 @@ def test08_void_pointer_passing(self): """Test passing of variants of void pointer arguments""" - import cppyy - pointer_pass = cppyy.gbl.pointer_pass - some_concrete_class = cppyy.gbl.some_concrete_class + import _cppyy + pointer_pass = _cppyy.gbl.pointer_pass + some_concrete_class = _cppyy.gbl.some_concrete_class pp = pointer_pass() o = some_concrete_class() - assert cppyy.addressof(o) == pp.gime_address_ptr(o) - assert cppyy.addressof(o) == pp.gime_address_ptr_ptr(o) - assert cppyy.addressof(o) == pp.gime_address_ptr_ref(o) + assert _cppyy.addressof(o) == pp.gime_address_ptr(o) + assert _cppyy.addressof(o) == pp.gime_address_ptr_ptr(o) + assert _cppyy.addressof(o) == pp.gime_address_ptr_ref(o) import array - addressofo = array.array('l', [cppyy.addressof(o)]) + addressofo = array.array('l', [_cppyy.addressof(o)]) assert addressofo.buffer_info()[0] == pp.gime_address_ptr_ptr(addressofo) assert 0 == pp.gime_address_ptr(0) assert 0 == pp.gime_address_ptr(None) - ptr = cppyy.bind_object(0, some_concrete_class) - assert cppyy.addressof(ptr) == 0 + ptr = _cppyy.bind_object(0, some_concrete_class) + assert _cppyy.addressof(ptr) == 0 pp.set_address_ptr_ref(ptr) - assert cppyy.addressof(ptr) == 0x1234 + assert _cppyy.addressof(ptr) == 0x1234 pp.set_address_ptr_ptr(ptr) - assert cppyy.addressof(ptr) == 0x4321 + assert _cppyy.addressof(ptr) == 0x4321 def test09_opaque_pointer_passing(self): """Test passing around of opaque pointers""" - import cppyy - some_concrete_class = cppyy.gbl.some_concrete_class + import _cppyy + some_concrete_class = _cppyy.gbl.some_concrete_class o = some_concrete_class() # TODO: figure out the PyPy equivalent of CObject (may have to do this # through the C-API from C++) From pypy.commits at gmail.com Thu Jul 20 03:18:21 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 00:18:21 -0700 (PDT) Subject: [pypy-commit] cffi default: Support some sub-combination involving C++ and Python 3.x Message-ID: <5970593d.c990df0a.8e12f.ecf1@mx.google.com> Author: Armin Rigo Branch: Changeset: r2993:bd1327fdae8a Date: 2017-07-20 09:07 +0200 http://bitbucket.org/cffi/cffi/changeset/bd1327fdae8a/ Log: Support some sub-combination involving C++ and Python 3.x diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -1,6 +1,11 @@ /***** Support code for embedding *****/ +#ifdef __cplusplus +extern "C" { +#endif + + #if defined(_MSC_VER) # define CFFI_DLLEXPORT __declspec(dllexport) #elif defined(__GNUC__) @@ -525,3 +530,7 @@ #undef cffi_compare_and_swap #undef cffi_write_barrier #undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -412,6 +412,9 @@ prnt(' }') prnt(' p[0] = (const void *)0x%x;' % self._version) prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in # 'export_symbols', so instead of fighting it, just give up and From pypy.commits at gmail.com Thu Jul 20 03:18:24 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 00:18:24 -0700 (PDT) Subject: [pypy-commit] cffi default: Issue #323 Message-ID: <59705940.53121c0a.03d6.a82e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2994:88913a107e11 Date: 2017-07-20 09:07 +0200 http://bitbucket.org/cffi/cffi/changeset/88913a107e11/ Log: Issue #323 CFFI_DLLEXPORT for MinGW on Windows diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -6,7 +6,7 @@ #endif -#if defined(_MSC_VER) +#if defined(_WIN32) # define CFFI_DLLEXPORT __declspec(dllexport) #elif defined(__GNUC__) # define CFFI_DLLEXPORT __attribute__((visibility("default"))) From pypy.commits at gmail.com Thu Jul 20 03:53:41 2017 From: pypy.commits at gmail.com (smihnea) Date: Thu, 20 Jul 2017 00:53:41 -0700 (PDT) Subject: [pypy-commit] pypy nopax: nopax added Message-ID: <59706185.84101c0a.73fd8.bb2c@mx.google.com> Author: Mihnea Saracin Branch: nopax Changeset: r91933:2b5d6124c784 Date: 2017-05-26 15:23 +0300 http://bitbucket.org/pypy/pypy/changeset/2b5d6124c784/ Log: nopax added diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -22,12 +22,23 @@ linklibs = ['tcl', 'tk'] libdirs = [] else: + # On some Linux distributions, the tcl and tk libraries are + # stored in /usr/include, so we must check this case also + found = False for _ver in ['', '8.6', '8.5', '']: incdirs = ['/usr/include/tcl' + _ver] linklibs = ['tcl' + _ver, 'tk' + _ver] libdirs = [] if os.path.isdir(incdirs[0]): + found = True break + if not found: + for _ver in ['8.6', '8.5', '']: + incdirs = ['/usr/include'] + linklibs = ['tcl' + _ver, 'tk' + _ver] + libdirs=[] + if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])): + break config_ffi = FFI() config_ffi.cdef(""" diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -264,6 +264,9 @@ raise Exception("Cannot use the --output option with PyPy " "when --shared is on (it is by default). " "See issue #1971.") + elif config.translation.nopax is not None: + raise Exception("Cannot use the --nopax option " + "when --shared is off (it is on by default). ") # if both profopt and profoptpath are specified then we keep them as they are with no other changes if config.translation.profopt: diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -145,6 +145,9 @@ BoolOption("profopt", "Enable profile guided optimization. Defaults to enabling this for PyPy. For other training workloads, please specify them in profoptargs", cmdline="--profopt", default=False), StrOption("profoptargs", "Absolute path to the profile guided optimization training script + the necessary arguments of the script", cmdline="--profoptargs", default=None), + BoolOption("nopax", "Use this in case your system comes with a PAX protection. --nopax will disable it for pypy, so that it can use the jit. Requires paxmark to be installed", + default=False, + cmdline="--nopax"), BoolOption("instrument", "internal: turn instrumentation on", default=False, cmdline=None), BoolOption("countmallocs", "Count mallocs and frees", default=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -333,6 +333,8 @@ extra_opts = [] if self.config.translation.profopt: extra_opts += ["profopt"] + if self.config.translation.nopax: + extra_opts += ["nopax"] if self.config.translation.make_jobs != 1: extra_opts += ['-j', str(self.config.translation.make_jobs)] if self.config.translation.lldebug: @@ -396,6 +398,15 @@ '$(MAKE) CFLAGS="-fprofile-use -fprofile-correction -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-use $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', ])) + # No-pax code + if self.config.translation.nopax: + mk.definition('PAX_TARGET', '%s' % (exe_name)) + rules.append(('$(PAX_TARGET)', '$(TARGET) main.o', [ + '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', + 'paxmark -zm $(PAX_TARGET)'])) + mk.rule('nopax', '', + '$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGSEXTRA)" LDFLAGS="$(LDFLAGS)" $(PAX_TARGET)') + for rule in rules: mk.rule(*rule) From pypy.commits at gmail.com Thu Jul 20 03:53:43 2017 From: pypy.commits at gmail.com (smihnea) Date: Thu, 20 Jul 2017 00:53:43 -0700 (PDT) Subject: [pypy-commit] pypy nopax: nopax autodetection Message-ID: <59706187.d08f1c0a.ae401.8192@mx.google.com> Author: Mihnea Saracin Branch: nopax Changeset: r91934:af2f69738d56 Date: 2017-06-06 10:25 +0300 http://bitbucket.org/pypy/pypy/changeset/af2f69738d56/ Log: nopax autodetection diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -32,13 +32,13 @@ if os.path.isdir(incdirs[0]): found = True break - if not found: - for _ver in ['8.6', '8.5', '']: - incdirs = ['/usr/include'] - linklibs = ['tcl' + _ver, 'tk' + _ver] - libdirs=[] - if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])): - break + if not found: + for _ver in ['8.6', '8.5', '']: + incdirs = ['/usr/include'] + linklibs = ['tcl' + _ver, 'tk' + _ver] + libdirs=[] + if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])): + break config_ffi = FFI() config_ffi.cdef(""" diff --git a/rpython/config/support.py b/rpython/config/support.py --- a/rpython/config/support.py +++ b/rpython/config/support.py @@ -35,3 +35,15 @@ return int(count) except (OSError, ValueError): return 1 + +def detect_pax(): + """ + Function to determine if your system comes with PAX protection. + """ + if sys.platform.startswith('linux'): + # we need a running process PID and 1 is always running + with open("/proc/1/status") as fd: + data = fd.read() + if 'PaX' in data: + return True + return False diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -4,6 +4,7 @@ from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.config.support import detect_pax from rpython.translator.platform import platform as compiler @@ -146,7 +147,7 @@ cmdline="--profopt", default=False), StrOption("profoptargs", "Absolute path to the profile guided optimization training script + the necessary arguments of the script", cmdline="--profoptargs", default=None), BoolOption("nopax", "Use this in case your system comes with a PAX protection. --nopax will disable it for pypy, so that it can use the jit. Requires paxmark to be installed", - default=False, + default=detect_pax(), cmdline="--nopax"), BoolOption("instrument", "internal: turn instrumentation on", default=False, cmdline=None), From pypy.commits at gmail.com Thu Jul 20 03:53:45 2017 From: pypy.commits at gmail.com (smihnea) Date: Thu, 20 Jul 2017 00:53:45 -0700 (PDT) Subject: [pypy-commit] pypy nopax: replaced the paxmark call with a general implementation Message-ID: <59706189.15ae1c0a.5f334.c51d@mx.google.com> Author: Mihnea Saracin Branch: nopax Changeset: r91935:acf31eacc13b Date: 2017-06-07 14:19 +0300 http://bitbucket.org/pypy/pypy/changeset/acf31eacc13b/ Log: replaced the paxmark call with a general implementation diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -403,7 +403,7 @@ mk.definition('PAX_TARGET', '%s' % (exe_name)) rules.append(('$(PAX_TARGET)', '$(TARGET) main.o', [ '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', - 'paxmark -zm $(PAX_TARGET)'])) + 'attr -q -s pax.flags -V m $(PAX_TARGET)'])) mk.rule('nopax', '', '$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGSEXTRA)" LDFLAGS="$(LDFLAGS)" $(PAX_TARGET)') From pypy.commits at gmail.com Thu Jul 20 03:53:47 2017 From: pypy.commits at gmail.com (Alecsandru Patrascu) Date: Thu, 20 Jul 2017 00:53:47 -0700 (PDT) Subject: [pypy-commit] pypy nopax_update1: updates after review Message-ID: <5970618b.42d41c0a.35e5c.30bd@mx.google.com> Author: Alecsandru Patrascu Branch: nopax_update1 Changeset: r91936:41d9025e47f4 Date: 2017-06-20 14:37 +0300 http://bitbucket.org/pypy/pypy/changeset/41d9025e47f4/ Log: updates after review diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -4,7 +4,6 @@ from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors -from rpython.config.support import detect_pax from rpython.translator.platform import platform as compiler @@ -146,9 +145,6 @@ BoolOption("profopt", "Enable profile guided optimization. Defaults to enabling this for PyPy. For other training workloads, please specify them in profoptargs", cmdline="--profopt", default=False), StrOption("profoptargs", "Absolute path to the profile guided optimization training script + the necessary arguments of the script", cmdline="--profoptargs", default=None), - BoolOption("nopax", "Use this in case your system comes with a PAX protection. --nopax will disable it for pypy, so that it can use the jit. Requires paxmark to be installed", - default=detect_pax(), - cmdline="--nopax"), BoolOption("instrument", "internal: turn instrumentation on", default=False, cmdline=None), BoolOption("countmallocs", "Count mallocs and frees", default=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -14,6 +14,7 @@ from rpython.translator.gensupp import uniquemodulename, NameManager from rpython.translator.tool.cbuild import ExternalCompilationInfo + _CYGWIN = sys.platform == 'cygwin' _CPYTHON_RE = py.std.re.compile('^Python 2.[567]') @@ -333,8 +334,6 @@ extra_opts = [] if self.config.translation.profopt: extra_opts += ["profopt"] - if self.config.translation.nopax: - extra_opts += ["nopax"] if self.config.translation.make_jobs != 1: extra_opts += ['-j', str(self.config.translation.make_jobs)] if self.config.translation.lldebug: @@ -386,7 +385,7 @@ raise Exception("No profoptargs specified, neither in the command line, nor in the target. If the target is not PyPy, please specify profoptargs") if self.config.translation.shared: mk.rule('$(PROFOPT_TARGET)', '$(TARGET) main.o', - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov') + ['$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov', '$(MAKE) postcompile BIN=$(PROFOPT_TARGET)']) else: mk.definition('PROFOPT_TARGET', '$(TARGET)') @@ -398,15 +397,6 @@ '$(MAKE) CFLAGS="-fprofile-use -fprofile-correction -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-use $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', ])) - # No-pax code - if self.config.translation.nopax: - mk.definition('PAX_TARGET', '%s' % (exe_name)) - rules.append(('$(PAX_TARGET)', '$(TARGET) main.o', [ - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', - 'attr -q -s pax.flags -V m $(PAX_TARGET)'])) - mk.rule('nopax', '', - '$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGSEXTRA)" LDFLAGS="$(LDFLAGS)" $(PAX_TARGET)') - for rule in rules: mk.rule(*rule) diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -3,6 +3,7 @@ import py, os, sys from rpython.translator.platform import Platform, log, _run_subprocess +from rpython.config.support import detect_pax import rpython rpydir = str(py.path.local(rpython.__file__).join('..')) @@ -196,9 +197,17 @@ for args in definitions: m.definition(*args) + # Post compile rule to be executed after a TARGET is ran + # + # Some processing might be necessary on the resulting binary, + # which is received in $(BIN) parameter + postcompile_rule = ('postcompile', '', ['true']) + if detect_pax(): + postcompile_rule[2].append('attr -q -s pax.flags -V m $(BIN)') + rules = [ ('all', '$(DEFAULT_TARGET)', []), - ('$(TARGET)', '$(OBJECTS)', '$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)'), + ('$(TARGET)', '$(OBJECTS)', ['$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)', '$(MAKE) postcompile BIN=$(TARGET)']), ('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.s', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), @@ -207,6 +216,8 @@ for rule in rules: m.rule(*rule) + m.rule(*postcompile_rule) + if shared: m.definition('SHARED_IMPORT_LIB', libname), m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") @@ -216,7 +227,7 @@ 'int main(int argc, char* argv[]) ' '{ return $(PYPY_MAIN_FUNCTION)(argc, argv); }" > $@') m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.o'], - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)') + ['$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', '$(MAKE) postcompile BIN=$(DEFAULT_TARGET)']) return m From pypy.commits at gmail.com Thu Jul 20 03:53:51 2017 From: pypy.commits at gmail.com (smihnea) Date: Thu, 20 Jul 2017 00:53:51 -0700 (PDT) Subject: [pypy-commit] pypy nopax: deleted --nopax option Message-ID: <5970618f.ce8c1c0a.ae218.cf12@mx.google.com> Author: Mihnea Saracin Branch: nopax Changeset: r91938:54a8ed9bd4b5 Date: 2017-07-04 11:52 +0300 http://bitbucket.org/pypy/pypy/changeset/54a8ed9bd4b5/ Log: deleted --nopax option diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -264,9 +264,6 @@ raise Exception("Cannot use the --output option with PyPy " "when --shared is on (it is by default). " "See issue #1971.") - elif config.translation.nopax is not None: - raise Exception("Cannot use the --nopax option " - "when --shared is off (it is on by default). ") # if both profopt and profoptpath are specified then we keep them as they are with no other changes if config.translation.profopt: From pypy.commits at gmail.com Thu Jul 20 03:53:49 2017 From: pypy.commits at gmail.com (smihnea) Date: Thu, 20 Jul 2017 00:53:49 -0700 (PDT) Subject: [pypy-commit] pypy nopax: Merged in palecsandru/pypy_nopax_1/nopax_update1 (pull request #1) Message-ID: <5970618d.c6071c0a.a6338.cb4c@mx.google.com> Author: Mihnea Saracin Branch: nopax Changeset: r91937:b7a22c99849e Date: 2017-06-20 11:42 +0000 http://bitbucket.org/pypy/pypy/changeset/b7a22c99849e/ Log: Merged in palecsandru/pypy_nopax_1/nopax_update1 (pull request #1) updates after review Approved-by: Mihnea Saracin diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -4,7 +4,6 @@ from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors -from rpython.config.support import detect_pax from rpython.translator.platform import platform as compiler @@ -146,9 +145,6 @@ BoolOption("profopt", "Enable profile guided optimization. Defaults to enabling this for PyPy. For other training workloads, please specify them in profoptargs", cmdline="--profopt", default=False), StrOption("profoptargs", "Absolute path to the profile guided optimization training script + the necessary arguments of the script", cmdline="--profoptargs", default=None), - BoolOption("nopax", "Use this in case your system comes with a PAX protection. --nopax will disable it for pypy, so that it can use the jit. Requires paxmark to be installed", - default=detect_pax(), - cmdline="--nopax"), BoolOption("instrument", "internal: turn instrumentation on", default=False, cmdline=None), BoolOption("countmallocs", "Count mallocs and frees", default=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -14,6 +14,7 @@ from rpython.translator.gensupp import uniquemodulename, NameManager from rpython.translator.tool.cbuild import ExternalCompilationInfo + _CYGWIN = sys.platform == 'cygwin' _CPYTHON_RE = py.std.re.compile('^Python 2.[567]') @@ -333,8 +334,6 @@ extra_opts = [] if self.config.translation.profopt: extra_opts += ["profopt"] - if self.config.translation.nopax: - extra_opts += ["nopax"] if self.config.translation.make_jobs != 1: extra_opts += ['-j', str(self.config.translation.make_jobs)] if self.config.translation.lldebug: @@ -386,7 +385,7 @@ raise Exception("No profoptargs specified, neither in the command line, nor in the target. If the target is not PyPy, please specify profoptargs") if self.config.translation.shared: mk.rule('$(PROFOPT_TARGET)', '$(TARGET) main.o', - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov') + ['$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov', '$(MAKE) postcompile BIN=$(PROFOPT_TARGET)']) else: mk.definition('PROFOPT_TARGET', '$(TARGET)') @@ -398,15 +397,6 @@ '$(MAKE) CFLAGS="-fprofile-use -fprofile-correction -fPIC $(CFLAGS) -fno-lto" LDFLAGS="-fprofile-use $(LDFLAGS) -fno-lto" $(PROFOPT_TARGET)', ])) - # No-pax code - if self.config.translation.nopax: - mk.definition('PAX_TARGET', '%s' % (exe_name)) - rules.append(('$(PAX_TARGET)', '$(TARGET) main.o', [ - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', - 'attr -q -s pax.flags -V m $(PAX_TARGET)'])) - mk.rule('nopax', '', - '$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGSEXTRA)" LDFLAGS="$(LDFLAGS)" $(PAX_TARGET)') - for rule in rules: mk.rule(*rule) diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -3,6 +3,7 @@ import py, os, sys from rpython.translator.platform import Platform, log, _run_subprocess +from rpython.config.support import detect_pax import rpython rpydir = str(py.path.local(rpython.__file__).join('..')) @@ -196,9 +197,17 @@ for args in definitions: m.definition(*args) + # Post compile rule to be executed after a TARGET is ran + # + # Some processing might be necessary on the resulting binary, + # which is received in $(BIN) parameter + postcompile_rule = ('postcompile', '', ['true']) + if detect_pax(): + postcompile_rule[2].append('attr -q -s pax.flags -V m $(BIN)') + rules = [ ('all', '$(DEFAULT_TARGET)', []), - ('$(TARGET)', '$(OBJECTS)', '$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)'), + ('$(TARGET)', '$(OBJECTS)', ['$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)', '$(MAKE) postcompile BIN=$(TARGET)']), ('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.s', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), @@ -207,6 +216,8 @@ for rule in rules: m.rule(*rule) + m.rule(*postcompile_rule) + if shared: m.definition('SHARED_IMPORT_LIB', libname), m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") @@ -216,7 +227,7 @@ 'int main(int argc, char* argv[]) ' '{ return $(PYPY_MAIN_FUNCTION)(argc, argv); }" > $@') m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.o'], - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)') + ['$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', '$(MAKE) postcompile BIN=$(DEFAULT_TARGET)']) return m From pypy.commits at gmail.com Thu Jul 20 03:53:53 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 00:53:53 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in smihnea/pypy_nopax/nopax (pull request #551) Message-ID: <59706191.0e1a1c0a.88e30.a8f9@mx.google.com> Author: Armin Rigo Branch: Changeset: r91939:8fcad79f230c Date: 2017-07-20 07:53 +0000 http://bitbucket.org/pypy/pypy/changeset/8fcad79f230c/ Log: Merged in smihnea/pypy_nopax/nopax (pull request #551) Adding nopax option for system that use PaX diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -22,12 +22,23 @@ linklibs = ['tcl', 'tk'] libdirs = [] else: + # On some Linux distributions, the tcl and tk libraries are + # stored in /usr/include, so we must check this case also + found = False for _ver in ['', '8.6', '8.5', '']: incdirs = ['/usr/include/tcl' + _ver] linklibs = ['tcl' + _ver, 'tk' + _ver] libdirs = [] if os.path.isdir(incdirs[0]): + found = True break + if not found: + for _ver in ['8.6', '8.5', '']: + incdirs = ['/usr/include'] + linklibs = ['tcl' + _ver, 'tk' + _ver] + libdirs=[] + if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])): + break config_ffi = FFI() config_ffi.cdef(""" diff --git a/rpython/config/support.py b/rpython/config/support.py --- a/rpython/config/support.py +++ b/rpython/config/support.py @@ -35,3 +35,15 @@ return int(count) except (OSError, ValueError): return 1 + +def detect_pax(): + """ + Function to determine if your system comes with PAX protection. + """ + if sys.platform.startswith('linux'): + # we need a running process PID and 1 is always running + with open("/proc/1/status") as fd: + data = fd.read() + if 'PaX' in data: + return True + return False diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -14,6 +14,7 @@ from rpython.translator.gensupp import uniquemodulename, NameManager from rpython.translator.tool.cbuild import ExternalCompilationInfo + _CYGWIN = sys.platform == 'cygwin' _CPYTHON_RE = py.std.re.compile('^Python 2.[567]') @@ -458,7 +459,7 @@ if self.config.translation.shared: mk.rule('$(PROFOPT_TARGET)', '$(TARGET) main.o', - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov') + ['$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS) -lgcov', '$(MAKE) postcompile BIN=$(PROFOPT_TARGET)']) else: mk.definition('PROFOPT_TARGET', '$(TARGET)') diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -3,6 +3,7 @@ import py, os, sys from rpython.translator.platform import Platform, log, _run_subprocess +from rpython.config.support import detect_pax import rpython rpydir = str(py.path.local(rpython.__file__).join('..')) @@ -196,9 +197,17 @@ for args in definitions: m.definition(*args) + # Post compile rule to be executed after a TARGET is ran + # + # Some processing might be necessary on the resulting binary, + # which is received in $(BIN) parameter + postcompile_rule = ('postcompile', '', ['true']) + if detect_pax(): + postcompile_rule[2].append('attr -q -s pax.flags -V m $(BIN)') + rules = [ ('all', '$(DEFAULT_TARGET)', []), - ('$(TARGET)', '$(OBJECTS)', '$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)'), + ('$(TARGET)', '$(OBJECTS)', ['$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)', '$(MAKE) postcompile BIN=$(TARGET)']), ('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.s', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), @@ -207,6 +216,8 @@ for rule in rules: m.rule(*rule) + m.rule(*postcompile_rule) + if shared: m.definition('SHARED_IMPORT_LIB', libname), m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") @@ -216,7 +227,7 @@ 'int main(int argc, char* argv[]) ' '{ return $(PYPY_MAIN_FUNCTION)(argc, argv); }" > $@') m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.o'], - '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)') + ['$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)', '$(MAKE) postcompile BIN=$(DEFAULT_TARGET)']) return m From pypy.commits at gmail.com Thu Jul 20 04:01:23 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 01:01:23 -0700 (PDT) Subject: [pypy-commit] pypy default: Minor tweaks, be more explicit about the fact that if we don't Message-ID: <59706353.8e8d1c0a.d2b77.9d81@mx.google.com> Author: Armin Rigo Branch: Changeset: r91940:3c1c88df6ea9 Date: 2017-07-20 10:00 +0200 http://bitbucket.org/pypy/pypy/changeset/3c1c88df6ea9/ Log: Minor tweaks, be more explicit about the fact that if we don't really find anything we'll fall back to ``linklibs=['tcl','tk']`` diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -24,21 +24,25 @@ else: # On some Linux distributions, the tcl and tk libraries are # stored in /usr/include, so we must check this case also + libdirs = [] found = False - for _ver in ['', '8.6', '8.5', '']: + for _ver in ['', '8.6', '8.5']: incdirs = ['/usr/include/tcl' + _ver] linklibs = ['tcl' + _ver, 'tk' + _ver] - libdirs = [] if os.path.isdir(incdirs[0]): found = True break if not found: for _ver in ['8.6', '8.5', '']: - incdirs = ['/usr/include'] + incdirs = [] linklibs = ['tcl' + _ver, 'tk' + _ver] - libdirs=[] if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])): + found = True break + if not found: + sys.stderr.write("*** TCL libraries not found! Falling back...\n") + incdirs = [] + linklibs = ['tcl', 'tk'] config_ffi = FFI() config_ffi.cdef(""" From pypy.commits at gmail.com Thu Jul 20 04:10:56 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 01:10:56 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Issue #2605 Message-ID: <59706590.84101c0a.73fd8.bfd6@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91941:ca4d0c90f5a1 Date: 2017-07-20 10:09 +0200 http://bitbucket.org/pypy/pypy/changeset/ca4d0c90f5a1/ Log: Issue #2605 Support for libressl 2.5.4 diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py @@ -221,10 +221,16 @@ static const long X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM = 0; static const long X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED = 0; static const long X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 = 0; +#ifndef X509_V_ERR_HOSTNAME_MISMATCH static const long X509_V_ERR_HOSTNAME_MISMATCH = 0; +#endif +#ifndef X509_V_ERR_EMAIL_MISMATCH static const long X509_V_ERR_EMAIL_MISMATCH = 0; +#endif +#ifndef X509_V_ERR_IP_ADDRESS_MISMATCH static const long X509_V_ERR_IP_ADDRESS_MISMATCH = 0; #endif +#endif /* OpenSSL 1.0.2beta2+ verification parameters */ #if CRYPTOGRAPHY_OPENSSL_102BETA2_OR_GREATER && \ From pypy.commits at gmail.com Thu Jul 20 04:10:58 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 01:10:58 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Document ca4d0c90f5a1 Message-ID: <59706592.da4a1c0a.e53a9.c171@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91942:c8096e566a71 Date: 2017-07-20 10:10 +0200 http://bitbucket.org/pypy/pypy/changeset/c8096e566a71/ Log: Document ca4d0c90f5a1 diff --git a/lib_pypy/_cffi_ssl/README.md b/lib_pypy/_cffi_ssl/README.md --- a/lib_pypy/_cffi_ssl/README.md +++ b/lib_pypy/_cffi_ssl/README.md @@ -5,9 +5,15 @@ it renames the compiled shared object to _pypy_openssl.so (which means that cryptography can ship their own cffi backend) -NOTE: currently, we have changed ``_cffi_src/openssl/callbacks.py`` to -not rely on the CPython C API, and ``_cffi_src/utils.py`` for issue #2575 -(29c9a89359e4). (The first change is now backported.) +NOTE: currently, we have the following changes: + +* ``_cffi_src/openssl/callbacks.py`` to not rely on the CPython C API + (this change is now backported) + +* ``_cffi_src/utils.py`` for issue #2575 (29c9a89359e4) + +* ``_cffi_src/openssl/x509_vfy.py`` for issue #2605 (ca4d0c90f5a1) + # Tests? From pypy.commits at gmail.com Thu Jul 20 08:25:48 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 20 Jul 2017 05:25:48 -0700 (PDT) Subject: [pypy-commit] pypy multiphase: hg merge py3.5 Message-ID: <5970a14c.c7bf1c0a.a7d74.ec94@mx.google.com> Author: Ronan Lamy Branch: multiphase Changeset: r91943:8cad295748ea Date: 2017-07-20 14:22 +0200 http://bitbucket.org/pypy/pypy/changeset/8cad295748ea/ Log: hg merge py3.5 diff too long, truncating to 2000 out of 10996 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,6 +1,6 @@ syntax: glob *.py[co] -*.sw[po] +*.sw[pon] *~ .*.swp .idea @@ -10,6 +10,8 @@ .venv .cache +.cache/ +.gdb_history syntax: regexp ^testresult$ ^site-packages$ @@ -90,7 +92,6 @@ .hypothesis/ ^release/ ^rpython/_cache$ -^\.cache$ pypy/module/cppyy/.+/*\.pcm diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -38,3 +38,5 @@ b16a4363e930f6401bceb499b9520955504c6cb0 release-pypy3.5-v5.7.0 1aa2d8e03cdfab54b7121e93fda7e98ea88a30bf release-pypy2.7-v5.7.1 2875f328eae2216a87f3d6f335092832eb031f56 release-pypy3.5-v5.7.1 +c925e73810367cd960a32592dd7f728f436c125c release-pypy2.7-v5.8.0 +a37ecfe5f142bc971a86d17305cc5d1d70abec64 release-pypy3.5-v5.8.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -39,11 +39,11 @@ Armin Rigo Maciej Fijalkowski - Carl Friedrich Bolz + Carl Friedrich Bolz-Tereick Amaury Forgeot d'Arc Antonio Cuni + Matti Picus Samuele Pedroni - Matti Picus Ronan Lamy Alex Gaynor Philip Jenvey @@ -101,28 +101,28 @@ Vincent Legoll Michael Foord Stephan Diehl + Stefano Rivera Stefan Schwarzer Tomek Meka Valentino Volonghi - Stefano Rivera Patrick Maupin Devin Jeanpierre Bob Ippolito Bruno Gola David Malcolm Jean-Paul Calderone + Squeaky Edd Barrett - Squeaky Timo Paulssen Marius Gedminas Alexandre Fayolle Simon Burton Nicolas Truessel Martin Matusiak + Laurence Tratt Wenzhu Man Konstantin Lopuhin John Witulski - Laurence Tratt Greg Price Ivan Sichmann Freitas Dario Bertini @@ -149,13 +149,13 @@ Stian Andreassen Wanja Saatkamp Mike Blume + Joannah Nanjekye Gerald Klix Oscar Nierstrasz Rami Chowdhury Stefan H. Muller - Joannah Nanjekye + Tim Felgentreff Eugene Oden - Tim Felgentreff Jeff Terrace Henry Mason Vasily Kuznetsov @@ -164,11 +164,11 @@ Dusty Phillips Lukas Renggli Guenter Jantzen + Jasper Schulz Ned Batchelder Amit Regmi Anton Gulenko Sergey Matyunin - Jasper Schulz Andrew Chambers Nicolas Chauvat Andrew Durdin @@ -183,6 +183,7 @@ Gintautas Miliauskas Lucian Branescu Mihaila anatoly techtonik + Dodan Mihai Karl Bartel Gabriel Lavoie Jared Grubb @@ -220,12 +221,14 @@ Vaibhav Sood Reuben Cummings Attila Gobi + Alecsandru Patrascu Christopher Pope Tristan Arthur Christian Tismer Dan Stromberg Carl Meyer Florin Papa + Jens-Uwe Mager Valentina Mukhamedzhanova Stefano Parmesan touilleMan @@ -264,7 +267,6 @@ Dan Buch Lene Wagner Tomo Cocoa - Alecsandru Patrascu David Lievens Neil Blakey-Milner Henrik Vendelbo @@ -303,6 +305,7 @@ Anna Katrina Dominguez Kim Jin Su Amber Brown + Nate Bragg Ben Darnell Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -340,11 +343,13 @@ Jim Hunziker shoma hosaka Buck Golemon + Iraklis D. JohnDoe yrttyr Michael Chermside Anna Ravencroft remarkablerocket + Petre Vijiac Berker Peksag Christian Muirhead soareschen diff --git a/extra_tests/test_decimal.py b/extra_tests/test_decimal.py --- a/extra_tests/test_decimal.py +++ b/extra_tests/test_decimal.py @@ -1,3 +1,6 @@ +import pytest +from hypothesis import example, settings, given, strategies as st + import pickle import sys @@ -8,52 +11,112 @@ # import _decimal as C # import _pydecimal as P + at pytest.yield_fixture(params=[C, P], ids=['_decimal', '_pydecimal']) +def module(request): + yield request.param -class TestPythonAPI: +# Translate symbols. +CondMap = { + C.Clamped: P.Clamped, + C.ConversionSyntax: P.ConversionSyntax, + C.DivisionByZero: P.DivisionByZero, + C.DivisionImpossible: P.InvalidOperation, + C.DivisionUndefined: P.DivisionUndefined, + C.Inexact: P.Inexact, + C.InvalidContext: P.InvalidContext, + C.InvalidOperation: P.InvalidOperation, + C.Overflow: P.Overflow, + C.Rounded: P.Rounded, + C.Subnormal: P.Subnormal, + C.Underflow: P.Underflow, + C.FloatOperation: P.FloatOperation, +} - def check_equal(self, val, proto): - d = C.Decimal(val) - p = pickle.dumps(d, proto) - assert d == pickle.loads(p) +def check_same_flags(flags_C, flags_P): + for signal in flags_C: + assert flags_C[signal] == flags_P[CondMap[signal]] - def test_C(self): + +def test_C(): + sys.modules["decimal"] = C + import decimal + d = decimal.Decimal('1') + assert isinstance(d, C.Decimal) + assert isinstance(d, decimal.Decimal) + assert isinstance(d.as_tuple(), C.DecimalTuple) + + assert d == C.Decimal('1') + +def check_round_trip(val, proto): + d = C.Decimal(val) + p = pickle.dumps(d, proto) + assert d == pickle.loads(p) + +def test_pickle(): + v = '-3.123e81723' + for proto in range(pickle.HIGHEST_PROTOCOL + 1): sys.modules["decimal"] = C - import decimal - d = decimal.Decimal('1') - assert isinstance(d, C.Decimal) - assert isinstance(d, decimal.Decimal) - assert isinstance(d.as_tuple(), C.DecimalTuple) + check_round_trip('-3.141590000', proto) + check_round_trip(v, proto) - assert d == C.Decimal('1') + cd = C.Decimal(v) + pd = P.Decimal(v) + cdt = cd.as_tuple() + pdt = pd.as_tuple() + assert cdt.__module__ == pdt.__module__ - def test_pickle(self): - v = '-3.123e81723' - for proto in range(pickle.HIGHEST_PROTOCOL + 1): - sys.modules["decimal"] = C - self.check_equal('-3.141590000', proto) - self.check_equal(v, proto) + p = pickle.dumps(cdt, proto) + r = pickle.loads(p) + assert isinstance(r, C.DecimalTuple) + assert cdt == r - cd = C.Decimal(v) - pd = P.Decimal(v) - cdt = cd.as_tuple() - pdt = pd.as_tuple() - assert cdt.__module__ == pdt.__module__ + sys.modules["decimal"] = C + p = pickle.dumps(cd, proto) + sys.modules["decimal"] = P + r = pickle.loads(p) + assert isinstance(r, P.Decimal) + assert r == pd - p = pickle.dumps(cdt, proto) - r = pickle.loads(p) - assert isinstance(r, C.DecimalTuple) - assert cdt == r + sys.modules["decimal"] = C + p = pickle.dumps(cdt, proto) + sys.modules["decimal"] = P + r = pickle.loads(p) + assert isinstance(r, P.DecimalTuple) + assert r == pdt - sys.modules["decimal"] = C - p = pickle.dumps(cd, proto) - sys.modules["decimal"] = P - r = pickle.loads(p) - assert isinstance(r, P.Decimal) - assert r == pd +def test_compare_total(module): + assert module.Decimal('12').compare_total(module.Decimal('12.0')) == 1 + assert module.Decimal('4367').compare_total(module.Decimal('NaN')) == -1 - sys.modules["decimal"] = C - p = pickle.dumps(cdt, proto) - sys.modules["decimal"] = P - r = pickle.loads(p) - assert isinstance(r, P.DecimalTuple) - assert r == pdt +def test_compare_total_mag(module): + assert module.Decimal(1).compare_total_mag(-2) == -1 + +def convert_arg(module, arg): + if isinstance(arg, module.Decimal): + return arg + elif type(arg).__name__ == 'Decimal': + return module.Decimal(str(arg)) + else: + return arg + +from fractions import Fraction +from decimal import Decimal + + at given(st.decimals(), st.decimals() | st.fractions()) +def test_lt(d1, d2): + with C.localcontext(C.ExtendedContext) as ctx_C: + d1_C = convert_arg(C, d1) + d2_C = convert_arg(C, d2) + try: + res_C = d1_C < d2_C + except Exception as e: + res_C = str(type(e)) + with P.localcontext(P.ExtendedContext) as ctx_P: + d1_P = convert_arg(P, d1) + d2_P = convert_arg(P, d2) + try: + res_P = d1_P < d2_P + except Exception as e: + res_P = str(type(e)) + assert res_C == res_P + check_same_flags(ctx_C.flags, ctx_P.flags) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -580,6 +580,7 @@ "getentropy() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") + @test_support.impl_detail(pypy=False) # on Linux, may use getrandom() def test_urandom_failure(self): # Check urandom() failing when it is not able to open /dev/random. # We spawn a new process to make the test more robust (if getrlimit() diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py --- a/lib-python/2.7/warnings.py +++ b/lib-python/2.7/warnings.py @@ -309,9 +309,12 @@ def __init__(self, message, category, filename, lineno, file=None, line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) + self.message = message + self.category = category + self.filename = filename + self.lineno = lineno + self.file = file + self.line = line self._category_name = category.__name__ if category else None def __str__(self): diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py --- a/lib-python/3/test/test_descr.py +++ b/lib-python/3/test/test_descr.py @@ -1663,7 +1663,8 @@ self.assertEqual(b.foo, 3) self.assertEqual(b.__class__, D) - @unittest.expectedFailure + #@unittest.expectedFailure --- on CPython. On PyPy, the test passes + @support.impl_detail(cpython=False) def test_bad_new(self): self.assertRaises(TypeError, object.__new__) self.assertRaises(TypeError, object.__new__, '') diff --git a/lib_pypy/_cffi_ssl/README.md b/lib_pypy/_cffi_ssl/README.md --- a/lib_pypy/_cffi_ssl/README.md +++ b/lib_pypy/_cffi_ssl/README.md @@ -5,8 +5,15 @@ it renames the compiled shared object to _pypy_openssl.so (which means that cryptography can ship their own cffi backend) -NOTE: currently, we have changed ``_cffi_src/openssl/callbacks.py`` to -not rely on the CPython C API. +NOTE: currently, we have the following changes: + +* ``_cffi_src/openssl/callbacks.py`` to not rely on the CPython C API + (this change is now backported) + +* ``_cffi_src/utils.py`` for issue #2575 (29c9a89359e4) + +* ``_cffi_src/openssl/x509_vfy.py`` for issue #2605 (ca4d0c90f5a1) + # Tests? diff --git a/lib_pypy/_cffi_ssl/_cffi_src/.build_openssl.py.swn b/lib_pypy/_cffi_ssl/_cffi_src/.build_openssl.py.swn deleted file mode 100644 index 180c02ff82d3363f34a334aae22c9876d4c96481..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py @@ -221,10 +221,16 @@ static const long X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM = 0; static const long X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED = 0; static const long X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 = 0; +#ifndef X509_V_ERR_HOSTNAME_MISMATCH static const long X509_V_ERR_HOSTNAME_MISMATCH = 0; +#endif +#ifndef X509_V_ERR_EMAIL_MISMATCH static const long X509_V_ERR_EMAIL_MISMATCH = 0; +#endif +#ifndef X509_V_ERR_IP_ADDRESS_MISMATCH static const long X509_V_ERR_IP_ADDRESS_MISMATCH = 0; #endif +#endif /* OpenSSL 1.0.2beta2+ verification parameters */ #if CRYPTOGRAPHY_OPENSSL_102BETA2_OR_GREATER && \ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/utils.py b/lib_pypy/_cffi_ssl/_cffi_src/utils.py --- a/lib_pypy/_cffi_ssl/_cffi_src/utils.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/utils.py @@ -47,9 +47,19 @@ # is legal, but the following will fail to compile: # int foo(int); # int foo(short); + # + # XXX No, it is a bad idea. OpenSSL itself tends to tweak + # the definitions, like adding a 'const' (see issue #2575). Every + # time they do so, it makes a gratuitous break in this code. It is + # better to rely on the C compiler for that, which is a little bit + # more flexible. That's the point of set_source(). We can still + # re-enable the line ``#functions +`` below to get the original + # behavior. (I would enable it during tests, but I don't find any + # custom test at all..??) + # verify_source = "\n".join( includes + - functions + + #functions + customizations ) ffi = build_ffi( diff --git a/lib_pypy/_cffi_ssl/_stdssl/.__init__.py.swn b/lib_pypy/_cffi_ssl/_stdssl/.__init__.py.swn deleted file mode 100644 index 40344f6cee5cb001b73dd3a9a203015568831391..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib_pypy/_cffi_ssl/_stdssl/error.py b/lib_pypy/_cffi_ssl/_stdssl/error.py --- a/lib_pypy/_cffi_ssl/_stdssl/error.py +++ b/lib_pypy/_cffi_ssl/_stdssl/error.py @@ -1,4 +1,5 @@ import sys +import os import traceback from _pypy_openssl import ffi from _pypy_openssl import lib @@ -100,18 +101,17 @@ errval = SSL_ERROR_WANT_CONNECT elif err == SSL_ERROR_SYSCALL: if e == 0: - if ret == 0 or obj.socket is not None: + if ret == 0 or obj.socket is None: errtype = SSLEOFError errstr = "EOF occurred in violation of protocol" errval = SSL_ERROR_EOF elif ret == -1 and obj.socket is not None: # the underlying BIO reported an I/0 error lib.ERR_clear_error() - s = obj.get_socket_or_None() - s.errorhandler() - assert 0, "must not get here" - #errno = ffi.errno - #return IOError(errno) + # s = obj.get_socket_or_None() + # XXX: Windows? + errno = ffi.errno + return OSError(errno, os.strerror(errno)) else: errtype = SSLSyscallError errstr = "Some I/O error occurred" diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -74,12 +74,16 @@ return self._type_._alignmentofinstances() def _CData_output(self, resarray, base=None, index=-1): - # this seems to be a string if we're array of char, surprise! - from ctypes import c_char, c_wchar - if self._type_ is c_char: - return _rawffi.charp2string(resarray.buffer, self._length_) - if self._type_ is c_wchar: - return _rawffi.wcharp2unicode(resarray.buffer, self._length_) + from _rawffi.alt import types + # If a char_p or unichar_p is received, skip the string interpretation + if base._ffiargtype != types.Pointer(types.char_p) and \ + base._ffiargtype != types.Pointer(types.unichar_p): + # this seems to be a string if we're array of char, surprise! + from ctypes import c_char, c_wchar + if self._type_ is c_char: + return _rawffi.charp2string(resarray.buffer, self._length_) + if self._type_ is c_wchar: + return _rawffi.wcharp2unicode(resarray.buffer, self._length_) res = self.__new__(self) ffiarray = self._ffiarray.fromaddress(resarray.buffer, self._length_) res._buffer = ffiarray diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -8,6 +8,9 @@ from _curses_cffi import ffi, lib +version = b"2.2" +__version__ = b"2.2" + def _copy_to_globals(name): globals()[name] = getattr(lib, name) @@ -60,10 +63,6 @@ _setup() -# Do we want this? -# version = "2.2" -# __version__ = "2.2" - # ____________________________________________________________ @@ -404,6 +403,17 @@ raise error("getch requires 0 or 2 arguments") return val + def get_wch(self, *args): + wch = ffi.new("int[1]") + if len(args) == 0: + val = lib.wget_wch(self._win, wch) + elif len(args) == 2: + val = lib.mvwget_wch(self._win, *args, wch) + else: + raise error("get_wch requires 0 or 2 arguments") + _check_ERR(val, "get_wch"): + return wch[0] + def getkey(self, *args): if len(args) == 0: val = lib.wgetch(self._win) @@ -919,101 +929,29 @@ return None -# XXX: Do something about the following? -# /* Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES -# * and _curses.COLS */ -# #if defined(HAVE_CURSES_RESIZETERM) || defined(HAVE_CURSES_RESIZE_TERM) -# static int -# update_lines_cols(void) -# { -# PyObject *o; -# PyObject *m = PyImport_ImportModuleNoBlock("curses"); +# Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES +# and _curses.COLS +def update_lines_cols(): + globals()["LINES"] = lib.LINES + globals()["COLS"] = lib.COLS + try: + m = sys.modules["curses"] + m.LINES = lib.LINES + m.COLS = lib.COLS + except (KeyError, AttributeError): + pass -# if (!m) -# return 0; -# o = PyInt_FromLong(LINES); -# if (!o) { -# Py_DECREF(m); -# return 0; -# } -# if (PyObject_SetAttrString(m, "LINES", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# if (PyDict_SetItemString(ModDict, "LINES", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# Py_DECREF(o); -# o = PyInt_FromLong(COLS); -# if (!o) { -# Py_DECREF(m); -# return 0; -# } -# if (PyObject_SetAttrString(m, "COLS", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# if (PyDict_SetItemString(ModDict, "COLS", o)) { -# Py_DECREF(m); -# Py_DECREF(o); -# return 0; -# } -# Py_DECREF(o); -# Py_DECREF(m); -# return 1; -# } -# #endif +def resizeterm(lines, columns): + _ensure_initialised() + _check_ERR(lib.resizeterm(lines, columns), "resizeterm") + update_lines_cols() -# #ifdef HAVE_CURSES_RESIZETERM -# static PyObject * -# PyCurses_ResizeTerm(PyObject *self, PyObject *args) -# { -# int lines; -# int columns; -# PyObject *result; -# PyCursesInitialised; - -# if (!PyArg_ParseTuple(args,"ii:resizeterm", &lines, &columns)) -# return NULL; - -# result = PyCursesCheckERR(resizeterm(lines, columns), "resizeterm"); -# if (!result) -# return NULL; -# if (!update_lines_cols()) -# return NULL; -# return result; -# } - -# #endif - -# #ifdef HAVE_CURSES_RESIZE_TERM -# static PyObject * -# PyCurses_Resize_Term(PyObject *self, PyObject *args) -# { -# int lines; -# int columns; - -# PyObject *result; - -# PyCursesInitialised; - -# if (!PyArg_ParseTuple(args,"ii:resize_term", &lines, &columns)) -# return NULL; - -# result = PyCursesCheckERR(resize_term(lines, columns), "resize_term"); -# if (!result) -# return NULL; -# if (!update_lines_cols()) -# return NULL; -# return result; -# } -# #endif /* HAVE_CURSES_RESIZE_TERM */ +def resize_term(lines, columns): + _ensure_initialised() + _check_ERR(lib.resize_term(lines, columns), "resize_term") + update_lines_cols() def setsyx(y, x): @@ -1078,6 +1016,11 @@ return _check_ERR(lib.ungetch(_chtype(ch)), "ungetch") +def unget_wch(ch): + _ensure_initialised() + return _check_ERR(lib.unget_wch(_chtype(ch)), "unget_wch") + + def use_env(flag): lib.use_env(flag) return None diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -1,3 +1,4 @@ +import os from cffi import FFI, VerificationError @@ -17,6 +18,11 @@ # error message raise e_last +def find_curses_include_dirs(): + if os.path.exists('/usr/include/ncursesw'): + return ['/usr/include/ncursesw'] + return [] + ffi = FFI() @@ -59,7 +65,8 @@ void _m_getsyx(int *yx) { getsyx(yx[0], yx[1]); } -""", libraries=[find_curses_library(), 'panel']) +""", libraries=[find_curses_library(), 'panel'], + include_dirs=find_curses_include_dirs()) ffi.cdef(""" @@ -70,6 +77,8 @@ typedef unsigned long... chtype; typedef chtype attr_t; +typedef int... wint_t; + typedef struct { short id; /* ID to distinguish multiple devices */ @@ -105,6 +114,13 @@ static const chtype A_CHARTEXT; static const chtype A_COLOR; +static const chtype A_HORIZONTAL; +static const chtype A_LEFT; +static const chtype A_LOW; +static const chtype A_RIGHT; +static const chtype A_TOP; +static const chtype A_VERTICAL; + static const int BUTTON1_RELEASED; static const int BUTTON1_PRESSED; static const int BUTTON1_CLICKED; @@ -160,6 +176,8 @@ void filter(void); int flash(void); int flushinp(void); +int wget_wch(WINDOW *, wint_t *); +int mvwget_wch(WINDOW *, int, int, wint_t *); chtype getbkgd(WINDOW *); WINDOW * getwin(FILE *); int halfdelay(int); @@ -220,6 +238,8 @@ int resetty(void); int reset_prog_mode(void); int reset_shell_mode(void); +int resizeterm(int, int); +int resize_term(int, int); int savetty(void); int scroll(WINDOW *); int scrollok(WINDOW *, bool); @@ -233,6 +253,7 @@ int touchwin(WINDOW *); int typeahead(int); int ungetch(int); +int unget_wch(const wchar_t); int untouchwin(WINDOW *); void use_env(bool); int waddch(WINDOW *, const chtype); diff --git a/lib_pypy/_decimal.py b/lib_pypy/_decimal.py --- a/lib_pypy/_decimal.py +++ b/lib_pypy/_decimal.py @@ -489,13 +489,16 @@ vv.exp = 0 multiplied = Decimal._new_empty() denom = Decimal(other.denominator) - with _CatchStatus(context) as (ctx, status_ptr): - _mpdec.mpd_qmul(multiplied._mpd, vv, denom._mpd, - ctx, status_ptr) - multiplied._mpd.exp += exp # XXX probably a bug - # in _decimal.c + maxctx = _ffi.new("struct mpd_context_t*") + _mpdec.mpd_maxcontext(maxctx) + status_ptr = _ffi.new("uint32_t*") + _mpdec.mpd_qmul(multiplied._mpd, vv, denom._mpd, + maxctx, status_ptr) + multiplied._mpd.exp = exp finally: _mpdec.mpd_del(vv) + if status_ptr[0] != 0: + raise ValueError("exact conversion for comparison failed") return multiplied, numerator else: @@ -719,8 +722,8 @@ compare = _make_binary_operation('compare') compare_signal = _make_binary_operation('compare_signal') - compare_total = _make_binary_operation('compare') - compare_total_mag = _make_binary_operation('compare') + compare_total = _make_binary_operation('compare_total') + compare_total_mag = _make_binary_operation('compare_total_mag') logical_and = _make_binary_operation('logical_and') logical_or = _make_binary_operation('logical_or') logical_xor = _make_binary_operation('logical_xor') diff --git a/lib_pypy/_lzma.py b/lib_pypy/_lzma.py --- a/lib_pypy/_lzma.py +++ b/lib_pypy/_lzma.py @@ -10,6 +10,7 @@ import weakref import sys import io +import __pypy__ from _lzma_cffi import ffi, lib as m @@ -63,6 +64,10 @@ m._pylzma_stream_init(ret) return ffi.gc(ret, m.lzma_end) +def _release_lzma_stream(st): + ffi.gc(st, None) + m.lzma_end(st) + def add_constant(c): globals()[c] = getattr(m, 'LZMA_' + c) @@ -148,39 +153,39 @@ def parse_filter_spec_lzma(id, preset=m.LZMA_PRESET_DEFAULT, **kwargs): ret = ffi.new('lzma_options_lzma*') if m.lzma_lzma_preset(ret, preset): - raise LZMAError("Invalid...") + raise LZMAError("Invalid compression preset: %s" % preset) for arg, val in kwargs.items(): if arg in ('dict_size', 'lc', 'lp', 'pb', 'nice_len', 'depth'): setattr(ret, arg, val) elif arg in ('mf', 'mode'): setattr(ret, arg, int(val)) else: - raise ValueError("Invalid...") + raise ValueError("Invalid filter specifier for LZMA filter") return ret def parse_filter_spec(spec): if not isinstance(spec, collections.Mapping): - raise TypeError("Filter...") + raise TypeError("Filter specifier must be a dict or dict-like object") ret = ffi.new('lzma_filter*') try: ret.id = spec['id'] except KeyError: - raise ValueError("Filter...") + raise ValueError("Filter specifier must have an \"id\" entry") if ret.id in (m.LZMA_FILTER_LZMA1, m.LZMA_FILTER_LZMA2): try: options = parse_filter_spec_lzma(**spec) except TypeError: - raise ValueError("Invalid...") + raise ValueError("Invalid filter specifier for LZMA filter") elif ret.id == m.LZMA_FILTER_DELTA: try: options = parse_filter_spec_delta(**spec) except TypeError: - raise ValueError("Invalid...") + raise ValueError("Invalid filter specifier for delta filter") elif ret.id in BCJ_FILTERS: try: options = parse_filter_spec_bcj(**spec) except TypeError: - raise ValueError("Invalid...") + raise ValueError("Invalid filter specifier for BCJ filter") else: raise ValueError("Invalid %d" % (ret.id,)) @@ -204,7 +209,9 @@ def parse_filter_chain_spec(filterspecs): if len(filterspecs) > m.LZMA_FILTERS_MAX: - raise ValueError("Too...") + raise ValueError( + "Too many filters - liblzma supports a maximum of %s" % + m.LZMA_FILTERS_MAX) filters = ffi.new('lzma_filter[]', m.LZMA_FILTERS_MAX+1) _owns[filters] = children = [] for i in range(m.LZMA_FILTERS_MAX+1): @@ -236,7 +243,7 @@ elif filter.id in BCJ_FILTERS: add_opts('lzma_options_bcj', 'start_offset') else: - raise ValueError("Invalid...") + raise ValueError("Invalid filter ID: %s" % filter.id) return spec def _decode_filter_properties(filter_id, encoded_props): @@ -420,25 +427,26 @@ For one-shot decompression, use the decompress() function instead. """ - def __init__(self, format=FORMAT_AUTO, memlimit=None, filters=None, header=None, check=None, unpadded_size=None): + def __init__(self, format=FORMAT_AUTO, memlimit=None, filters=None, + header=None, check=None, unpadded_size=None): decoder_flags = m.LZMA_TELL_ANY_CHECK | m.LZMA_TELL_NO_CHECK - #decoder_flags = 0 if memlimit is not None: if format == FORMAT_RAW: - raise ValueError("Cannot sp...") - #memlimit = long(memlimit) + raise ValueError("Cannot specify memory limit with FORMAT_RAW") else: memlimit = m.UINT64_MAX if format == FORMAT_RAW and filters is None: - raise ValueError("Must...") + raise ValueError("Must specify filters for FORMAT_RAW") elif format != FORMAT_RAW and filters is not None: - raise ValueError("Cannot...") + raise ValueError("Cannot specify filters except with FORMAT_RAW") if format == FORMAT_BLOCK and (header is None or unpadded_size is None or check is None): - raise ValueError("Must...") + raise ValueError("Must specify header, unpadded_size and check " + "with FORMAT_BLOCK") elif format != FORMAT_BLOCK and (header is not None or unpadded_size is not None or check is not None): - raise ValueError("Cannot...") + raise ValueError("Cannot specify header, unpadded_size or check " + "except with FORMAT_BLOCK") format = _parse_format(format) self.lock = threading.Lock() @@ -476,7 +484,7 @@ self.expected_size = block.compressed_size catch_lzma_error(m.lzma_block_decoder, self.lzs, block) else: - raise ValueError("invalid...") + raise ValueError("invalid container format: %s" % format) def pre_decompress_left_data(self, buf, buf_size): # in this case there is data left that needs to be processed before the first @@ -551,7 +559,7 @@ raise TypeError("max_length parameter object cannot be interpreted as an integer") with self.lock: if self.eof: - raise EOFError("Already...") + raise EOFError("Already at end of stream") lzs = self.lzs data = to_bytes(data) buf = ffi.new('uint8_t[]', data) @@ -648,6 +656,16 @@ raise TypeError("cannot serialize '%s' object" % self.__class__.__name__) + +# Issue #2579: Setting up the stream for encoding takes around 17MB of +# RAM on my Linux 64 system. So we call add_memory_pressure(17MB) when +# we create the stream. In flush(), we actively free the stream even +# though we could just leave it to the GC (but 17MB is too much for +# doing that sanely); at this point we call add_memory_pressure(-17MB) +# to cancel the original increase. +COMPRESSION_STREAM_SIZE = 1024*1024*17 + + class LZMACompressor(object): """ LZMACompressor(format=FORMAT_XZ, check=-1, preset=None, filters=None) @@ -679,15 +697,16 @@ """ def __init__(self, format=FORMAT_XZ, check=-1, preset=None, filters=None): if format != FORMAT_XZ and check not in (-1, m.LZMA_CHECK_NONE): - raise ValueError("Integrity...") + raise ValueError("Integrity checks are only supported by FORMAT_XZ") if preset is not None and filters is not None: - raise ValueError("Cannot...") + raise ValueError("Cannot specify both preset and filter chain") if preset is None: preset = m.LZMA_PRESET_DEFAULT format = _parse_format(format) self.lock = threading.Lock() self.flushed = 0 self.lzs = _new_lzma_stream() + __pypy__.add_memory_pressure(COMPRESSION_STREAM_SIZE) if format == FORMAT_XZ: if filters is None: if check == -1: @@ -702,19 +721,19 @@ if filters is None: options = ffi.new('lzma_options_lzma*') if m.lzma_lzma_preset(options, preset): - raise LZMAError("Invalid...") + raise LZMAError("Invalid compression preset: %s" % preset) catch_lzma_error(m.lzma_alone_encoder, self.lzs, options) else: raise NotImplementedError elif format == FORMAT_RAW: if filters is None: - raise ValueError("Must...") + raise ValueError("Must specify filters for FORMAT_RAW") filters = parse_filter_chain_spec(filters) catch_lzma_error(m.lzma_raw_encoder, self.lzs, filters) else: - raise ValueError("Invalid...") + raise ValueError("invalid container format: %s" % format) def compress(self, data): """ @@ -728,7 +747,7 @@ """ with self.lock: if self.flushed: - raise ValueError("Compressor...") + raise ValueError("Compressor has been flushed") return self._compress(data) def _compress(self, data, action=m.LZMA_RUN): @@ -769,9 +788,12 @@ def flush(self): with self.lock: if self.flushed: - raise ValueError("Repeated...") + raise ValueError("Repeated call to flush()") self.flushed = 1 - return self._compress(b'', action=m.LZMA_FINISH) + result = self._compress(b'', action=m.LZMA_FINISH) + __pypy__.add_memory_pressure(-COMPRESSION_STREAM_SIZE) + _release_lzma_stream(self.lzs) + return result def __getstate__(self): raise TypeError("cannot serialize '%s' object" % diff --git a/lib_pypy/cffi/_cffi_errors.h b/lib_pypy/cffi/_cffi_errors.h new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_cffi_errors.h @@ -0,0 +1,145 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " of.write(x)\n" + " self.buf += x\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -159,9 +159,9 @@ #define _cffi_from_c_struct \ ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18]) #define _cffi_to_c_wchar_t \ - ((wchar_t(*)(PyObject *))_cffi_exports[19]) + ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19]) #define _cffi_from_c_wchar_t \ - ((PyObject *(*)(wchar_t))_cffi_exports[20]) + ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20]) #define _cffi_to_c_long_double \ ((long double(*)(PyObject *))_cffi_exports[21]) #define _cffi_to_c__Bool \ @@ -174,7 +174,11 @@ #define _CFFI_CPIDX 25 #define _cffi_call_python \ ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) -#define _CFFI_NUM_EXPORTS 26 +#define _cffi_to_c_wchar3216_t \ + ((int(*)(PyObject *))_cffi_exports[26]) +#define _cffi_from_c_wchar3216_t \ + ((PyObject *(*)(int))_cffi_exports[27]) +#define _CFFI_NUM_EXPORTS 28 struct _cffi_ctypedescr; @@ -215,6 +219,46 @@ return NULL; } + +#ifdef HAVE_WCHAR_H +typedef wchar_t _cffi_wchar_t; +#else +typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */ +#endif + +_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 2) + return (uint16_t)_cffi_to_c_wchar_t(o); + else + return (uint16_t)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x) +{ + if (sizeof(_cffi_wchar_t) == 2) + return _cffi_from_c_wchar_t(x); + else + return _cffi_from_c_wchar3216_t(x); +} + +_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 4) + return (int)_cffi_to_c_wchar_t(o); + else + return (int)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x) +{ + if (sizeof(_cffi_wchar_t) == 4) + return _cffi_from_c_wchar_t(x); + else + return _cffi_from_c_wchar3216_t(x); +} + + /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -109,6 +109,8 @@ /********** CPython-specific section **********/ #ifndef PYPY_VERSION +#include "_cffi_errors.h" + #define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] @@ -220,8 +222,16 @@ /* Print as much information as potentially useful. Debugging load-time failures with embedding is not fun */ + PyObject *ecap; PyObject *exception, *v, *tb, *f, *modules, *mod; PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + if (exception != NULL) { PyErr_NormalizeException(&exception, &v, &tb); PyErr_Display(exception, v, tb); @@ -230,7 +240,6 @@ Py_XDECREF(v); Py_XDECREF(tb); - f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME "\ncompiled with cffi version: 1.11.0" @@ -249,6 +258,7 @@ PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); PyFile_WriteString("\n\n", f); } + _cffi_stop_error_capture(ecap); } result = -1; goto done; diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -75,9 +75,10 @@ self._init_once_cache = {} self._cdef_version = None self._embedding = None + self._typecache = model.get_typecache(backend) if hasattr(backend, 'set_ffi'): backend.set_ffi(self) - for name in backend.__dict__: + for name in list(backend.__dict__): if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # @@ -764,7 +765,7 @@ if sys.platform != "win32": return backend.load_library(None, flags) name = "c" # Windows: load_library(None) fails, but this works - # (backward compatibility hack only) + # on Python 2 (backward compatibility hack only) first_error = None if '.' in name or '/' in name or os.sep in name: try: @@ -774,6 +775,9 @@ import ctypes.util path = ctypes.util.find_library(name) if path is None: + if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): + raise OSError("dlopen(None) cannot work on Windows for Python 3 " + "(see http://bugs.python.org/issue23606)") msg = ("ctypes.util.find_library() did not manage " "to locate a library called %r" % (name,)) if first_error is not None: diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -107,9 +107,10 @@ PRIM_UINTMAX = 47 PRIM_FLOATCOMPLEX = 48 PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 - -_NUM_PRIM = 50 +_NUM_PRIM = 52 _UNKNOWN_PRIM = -1 _UNKNOWN_FLOAT_PRIM = -2 _UNKNOWN_LONG_DOUBLE = -3 @@ -135,6 +136,8 @@ 'double _Complex': PRIM_DOUBLECOMPLEX, '_Bool': PRIM_BOOL, 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, 'int8_t': PRIM_INT8, 'uint8_t': PRIM_UINT8, 'int16_t': PRIM_INT16, diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -122,6 +122,8 @@ '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', + 'char16_t': 'c', + 'char32_t': 'c', 'int8_t': 'i', 'uint8_t': 'i', 'int16_t': 'i', @@ -566,22 +568,26 @@ global_lock = allocate_lock() +_typecache_cffi_backend = weakref.WeakValueDictionary() + +def get_typecache(backend): + # returns _typecache_cffi_backend if backend is the _cffi_backend + # module, or type(backend).__typecache if backend is an instance of + # CTypesBackend (or some FakeBackend class during tests) + if isinstance(backend, types.ModuleType): + return _typecache_cffi_backend + with global_lock: + if not hasattr(type(backend), '__typecache'): + type(backend).__typecache = weakref.WeakValueDictionary() + return type(backend).__typecache def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds try: - return ffi._backend.__typecache[key] + return ffi._typecache[key] except KeyError: pass - except AttributeError: - # initialize the __typecache attribute, either at the module level - # if ffi._backend is a module, or at the class level if ffi._backend - # is some instance. - if isinstance(ffi._backend, types.ModuleType): - ffi._backend.__typecache = weakref.WeakValueDictionary() - else: - type(ffi._backend).__typecache = weakref.WeakValueDictionary() try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: @@ -589,7 +595,7 @@ # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves - cache = ffi._backend.__typecache + cache = ffi._typecache with global_lock: res1 = cache.get(key) if res1 is None: diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -81,8 +81,10 @@ #define _CFFI_PRIM_UINTMAX 47 #define _CFFI_PRIM_FLOATCOMPLEX 48 #define _CFFI_PRIM_DOUBLECOMPLEX 49 +#define _CFFI_PRIM_CHAR16 50 +#define _CFFI_PRIM_CHAR32 51 -#define _CFFI__NUM_PRIM 50 +#define _CFFI__NUM_PRIM 52 #define _CFFI__UNKNOWN_PRIM (-1) #define _CFFI__UNKNOWN_FLOAT_PRIM (-2) #define _CFFI__UNKNOWN_LONG_DOUBLE (-3) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,8 +3,9 @@ from .error import VerificationError from .cffi_opcode import * -VERSION = "0x2601" -VERSION_EMBEDDED = "0x2701" +VERSION_BASE = 0x2601 +VERSION_EMBEDDED = 0x2701 +VERSION_CHAR16CHAR32 = 0x2801 class GlobalExpr: @@ -126,6 +127,10 @@ self.ffi = ffi self.module_name = module_name self.target_is_python = target_is_python + self._version = VERSION_BASE + + def needs_version(self, ver): + self._version = max(self._version, ver) def collect_type_table(self): self._typesdict = {} @@ -303,10 +308,10 @@ base_module_name,)) prnt('#endif') lines = self._rel_readlines('_embedding.h') + i = lines.index('#include "_cffi_errors.h"\n') + lines[i:i+1] = self._rel_readlines('_cffi_errors.h') prnt(''.join(lines)) - version = VERSION_EMBEDDED - else: - version = VERSION + self.needs_version(VERSION_EMBEDDED) # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') @@ -405,7 +410,7 @@ prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % version) + prnt(' p[0] = (const void *)0x%x;' % self._version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -423,21 +428,22 @@ prnt('PyMODINIT_FUNC') prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') - prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, version)) + prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') - prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, version)) + prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) prnt('}') prnt('#endif') prnt() prnt('#ifdef __GNUC__') prnt('# pragma GCC visibility pop') prnt('#endif') + self._version = None def _to_py(self, x): if isinstance(x, str): @@ -476,7 +482,8 @@ prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) prnt() prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) - prnt(" _version = %s," % (VERSION,)) + prnt(" _version = 0x%x," % (self._version,)) + self._version = None # # the '_types' keyword argument self.cffi_types = tuple(self.cffi_types) # don't change any more @@ -515,8 +522,11 @@ # double' here, and _cffi_to_c_double would loose precision converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) else: - converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + cname = tp.get_c_name('') + converter = '(%s)_cffi_to_c_%s' % (cname, tp.name.replace(' ', '_')) + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -573,7 +583,10 @@ elif isinstance(tp, model.UnknownFloatType): return '_cffi_from_c_double(%s)' % (var,) elif tp.name != 'long double' and not tp.is_complex_type(): - return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) + cname = tp.name.replace(' ', '_') + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + return '_cffi_from_c_%s(%s)' % (cname, var) else: return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -808,7 +808,8 @@ #include /* this block of #ifs should be kept exactly identical between - c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ #if defined(_MSC_VER) # include /* for alloca() */ # if _MSC_VER < 1600 /* MSVC < 2010 */ @@ -842,11 +843,13 @@ # include # endif # if _MSC_VER < 1800 /* MSVC < 2013 */ - typedef unsigned char _Bool; +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif # endif #else # include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include # endif #endif diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -627,7 +627,8 @@ #include /* XXX for ssize_t on some platforms */ /* this block of #ifs should be kept exactly identical between - c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ #if defined(_MSC_VER) # include /* for alloca() */ # if _MSC_VER < 1600 /* MSVC < 2010 */ @@ -661,11 +662,13 @@ # include # endif # if _MSC_VER < 1800 /* MSVC < 2013 */ - typedef unsigned char _Bool; +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif # endif #else # include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include # endif #endif diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -268,12 +268,22 @@ assert abs(d) == 1 source = getcurrent() source.tempval = arg - if d > 0: - cando = self.balance < 0 - dir = d - else: - cando = self.balance > 0 - dir = 0 + while True: + if d > 0: + cando = self.balance < 0 + dir = d + else: + cando = self.balance > 0 + dir = 0 + + if cando and self.queue[0]._tasklet_killed: + # issue #2595: the tasklet was killed while waiting. + # drop that tasklet from consideration and try again. + self.balance += d + self.queue.popleft() + else: + # normal path + break if _channel_callback is not None: _channel_callback(self, source, dir, not cando) @@ -348,6 +358,8 @@ module. """ tempval = None + _tasklet_killed = False + def __new__(cls, func=None, label=''): res = coroutine.__new__(cls) res.label = label @@ -395,6 +407,7 @@ If the exception passes the toplevel frame of the tasklet, the tasklet will silently die. """ + self._tasklet_killed = True if not self.is_zombie: # Killing the tasklet by throwing TaskletExit exception. coroutine.kill(self) diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -93,7 +93,8 @@ libsqlite3 curses - libncurses + libncurses-dev (for PyPy2) + libncursesw-dev (for PyPy3) gdbm libgdbm-dev @@ -106,12 +107,13 @@ To run untranslated tests, you need the Boehm garbage collector libgc. -On Debian, this is the command to install all build-time dependencies:: +On Debian and Ubuntu, this is the command to install all build-time +dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ tk-dev libgc-dev python-cffi \ - liblzma-dev # For lzma on PyPy3. + liblzma-dev libncursesw-dev # these two only needed on PyPy3 On Fedora:: @@ -195,6 +197,29 @@ ``/tmp/usession-YOURNAME/build/``. You can then either move the file hierarchy or unpack the ``.tar.bz2`` at the correct place. +It is recommended to use package.py because custom scripts will +invariably become out-of-date. If you want to write custom scripts +anyway, note an easy-to-miss point: some modules are written with CFFI, +and require some compilation. If you install PyPy as root without +pre-compiling them, normal users will get errors: + +* PyPy 2.5.1 or earlier: normal users would see permission errors. + Installers need to run ``pypy -c "import gdbm"`` and other similar + commands at install time; the exact list is in `package.py`_. Users + seeing a broken installation of PyPy can fix it after-the-fact if they + have sudo rights, by running once e.g. ``sudo pypy -c "import gdbm``. + +* PyPy 2.6 and later: anyone would get ``ImportError: no module named + _gdbm_cffi``. Installers need to run ``pypy _gdbm_build.py`` in the + ``lib_pypy`` directory during the installation process (plus others; + see the exact list in `package.py`_). Users seeing a broken + installation of PyPy can fix it after-the-fact, by running ``pypy + /path/to/lib_pypy/_gdbm_build.py``. This command produces a file + called ``_gdbm_cffi.pypy-41.so`` locally, which is a C extension + module for PyPy. You can move it at any place where modules are + normally found: e.g. in your project's main directory, or in a + directory that you add to the env var ``PYTHONPATH``. + Installation ------------ diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -59,16 +59,16 @@ # General information about the project. project = u'PyPy' -copyright = u'2016, The PyPy Project' +copyright = u'2017, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '5.4' +version = '5.8' # The full version, including alpha/beta/rc tags. -release = '5.4.0' +release = '5.8.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,11 +6,11 @@ Armin Rigo Maciej Fijalkowski - Carl Friedrich Bolz + Carl Friedrich Bolz-Tereick Amaury Forgeot d'Arc Antonio Cuni + Matti Picus Samuele Pedroni - Matti Picus Ronan Lamy Alex Gaynor Philip Jenvey @@ -68,28 +68,28 @@ Vincent Legoll Michael Foord Stephan Diehl + Stefano Rivera Stefan Schwarzer Tomek Meka Valentino Volonghi - Stefano Rivera Patrick Maupin Devin Jeanpierre Bob Ippolito Bruno Gola David Malcolm Jean-Paul Calderone + Squeaky Edd Barrett - Squeaky Timo Paulssen Marius Gedminas Alexandre Fayolle Simon Burton Nicolas Truessel Martin Matusiak + Laurence Tratt Wenzhu Man Konstantin Lopuhin John Witulski - Laurence Tratt Greg Price Ivan Sichmann Freitas Dario Bertini @@ -116,13 +116,13 @@ Stian Andreassen Wanja Saatkamp Mike Blume + Joannah Nanjekye Gerald Klix Oscar Nierstrasz Rami Chowdhury Stefan H. Muller - Joannah Nanjekye + Tim Felgentreff Eugene Oden - Tim Felgentreff Jeff Terrace Henry Mason Vasily Kuznetsov @@ -131,11 +131,11 @@ Dusty Phillips Lukas Renggli Guenter Jantzen + Jasper Schulz Ned Batchelder Amit Regmi Anton Gulenko Sergey Matyunin - Jasper Schulz Andrew Chambers Nicolas Chauvat Andrew Durdin @@ -150,6 +150,7 @@ Gintautas Miliauskas Lucian Branescu Mihaila anatoly techtonik + Dodan Mihai Karl Bartel Gabriel Lavoie Jared Grubb @@ -187,12 +188,14 @@ Vaibhav Sood Reuben Cummings Attila Gobi + Alecsandru Patrascu Christopher Pope Tristan Arthur Christian Tismer Dan Stromberg Carl Meyer Florin Papa + Jens-Uwe Mager Valentina Mukhamedzhanova Stefano Parmesan touilleMan @@ -231,7 +234,6 @@ Dan Buch Lene Wagner Tomo Cocoa - Alecsandru Patrascu David Lievens Neil Blakey-Milner Henrik Vendelbo @@ -270,6 +272,7 @@ Anna Katrina Dominguez Kim Jin Su Amber Brown + Nate Bragg Ben Darnell Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -307,11 +310,13 @@ Jim Hunziker shoma hosaka Buck Golemon + Iraklis D. JohnDoe yrttyr Michael Chermside Anna Ravencroft remarkablerocket + Petre Vijiac Berker Peksag Christian Muirhead soareschen diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst --- a/pypy/doc/discussion/finalizer-order.rst +++ b/pypy/doc/discussion/finalizer-order.rst @@ -60,7 +60,7 @@ The interface for full finalizers is made with PyPy in mind, but should be generally useful. -The idea is that you subclass the ``rgc.FinalizerQueue`` class:: +The idea is that you subclass the ``rgc.FinalizerQueue`` class: * You must give a class-level attribute ``base_class``, which is the base class of all instances with a finalizer. (If you need diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -68,10 +68,12 @@ and O = list of links created with rawrefcount.create_link_pyobj(). The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all the data is in the PyObjects, and all outsite references (if any) are -in C, as "PyObject *" fields. +in C, as ``PyObject *`` fields. So, during the collection we do this about P links: +.. code-block:: python + for (p, ob) in P: if ob->ob_refcnt != REFCNT_FROM_PYPY and ob->ob_refcnt != REFCNT_FROM_PYPY_LIGHT: @@ -80,6 +82,8 @@ At the end of the collection, the P and O links are both handled like this: +.. code-block:: python + for (p, ob) in P + O: if p is not surviving: # even if 'ob' might be surviving unlink p and ob diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -5,8 +5,8 @@ ++++++++++++++ We try to create a stable release a few times a year. These are released on -a branch named like release-2.x or release-4.x, and each release is tagged, -for instance release-4.0.1. +a branch named like release-pypy3.5-v2.x or release-pypy3.5-v4.x, and each +release is tagged, for instance release-pypy3.5-v4.0.1. After release, inevitably there are bug fixes. It is the responsibility of the commiter who fixes a bug to make sure this fix is on the release branch, @@ -33,7 +33,7 @@ * If needed, make a release branch * Bump the pypy version number in module/sys/version.py and in - module/cpyext/include/patchlevel.h and . The branch + module/cpyext/include/patchlevel.h and in doc/conf.py. The branch will capture the revision number of this change for the release. Some of the next updates may be done before or after branching; make diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -30,12 +30,22 @@ whatsnew-2.0.0-beta1.rst whatsnew-1.9.rst +CPython 3.5 compatible versions +------------------------------- + +.. toctree:: + + whatsnew-pypy3-head.rst + whatsnew-pypy3-5.8.0.rst + whatsnew-pypy3-5.7.0.rst + CPython 3.3 compatible versions ------------------------------- .. toctree:: whatsnew-pypy3-5.5.0.rst + whatsnew-pypy3-5.1.1-alpha1.rst CPython 3.2 compatible versions ------------------------------- diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -12,6 +12,7 @@ and using pip. .. _prebuilt-pypy: + Download a pre-built PyPy ~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -250,12 +250,12 @@ .. py:function:: newunicode(ustr) Creates a Unicode string from an rpython unicode string. - This method may disappear soon and be replaced by :py:function:`newutf8()`. + This method may disappear soon and be replaced by :py:function::`newutf8`. .. py:function:: newutf8(bytestr) Creates a Unicode string from an rpython byte string, decoded as - "utf-8-nosg". On PyPy3 it is the same as :py:function:`newtext()`. + "utf-8-nosg". On PyPy3 it is the same as :py:function::`newtext`. Many more space operations can be found in `pypy/interpeter/baseobjspace.py` and `pypy/objspace/std/objspace.py`. @@ -302,9 +302,9 @@ .. py:function:: unicode_w(w_x) - Takes an application level :py:class:`unicode` and return an + Takes an application level :py:class::`unicode` and return an interpreter-level unicode string. This method may disappear soon and - be replaced by :py:function:`text_w()`. + be replaced by :py:function::`text_w`. .. py:function:: float_w(w_x) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -238,18 +238,17 @@ using more pypy-friendly technologies, e.g. cffi. Here is a partial list of good work that needs to be finished: -**matplotlib** https://github.com/mattip/matplotlib +**matplotlib** https://github.com/matplotlib/matplotlib - Status: the repo is an older version of matplotlib adapted to pypy and cpyext + TODO: the tkagg backend does not work, which makes tests fail on downstream + projects like Pandas, SciPy. It uses id(obj) as a c-pointer to obj in + tkagg.py, which requires refactoring - TODO: A suggested first step would be to merge the differences into From pypy.commits at gmail.com Thu Jul 20 10:48:10 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 20 Jul 2017 07:48:10 -0700 (PDT) Subject: [pypy-commit] pypy default: Mention that you can't compile new regexps in an RPython program, Message-ID: <5970c2aa.41921c0a.67d17.faaa@mx.google.com> Author: Armin Rigo Branch: Changeset: r91944:c40c4fbf020e Date: 2017-07-20 16:47 +0200 http://bitbucket.org/pypy/pypy/changeset/c40c4fbf020e/ Log: Mention that you can't compile new regexps in an RPython program, you can only use precompiled ones diff --git a/rpython/rlib/rsre/rpy/_sre.py b/rpython/rlib/rsre/rpy/_sre.py --- a/rpython/rlib/rsre/rpy/_sre.py +++ b/rpython/rlib/rsre/rpy/_sre.py @@ -16,6 +16,8 @@ def get_code(regexp, flags=0, allargs=False): + """NOT_RPYTHON: you can't compile new regexps in an RPython program, + you can only use precompiled ones""" from . import sre_compile try: sre_compile.compile(regexp, flags) From pypy.commits at gmail.com Thu Jul 20 12:03:01 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 20 Jul 2017 09:03:01 -0700 (PDT) Subject: [pypy-commit] pypy multiphase: rename _testmultiphase to multiphase2, to avoid confusion with the CPython extension _testmultiphase Message-ID: <5970d435.6593df0a.3c68.5b26@mx.google.com> Author: Ronan Lamy Branch: multiphase Changeset: r91945:e2b16d8ae93f Date: 2017-07-20 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/e2b16d8ae93f/ Log: rename _testmultiphase to multiphase2, to avoid confusion with the CPython extension _testmultiphase diff --git a/pypy/module/cpyext/test/_testmultiphase.c b/pypy/module/cpyext/test/multiphase2.c rename from pypy/module/cpyext/test/_testmultiphase.c rename to pypy/module/cpyext/test/multiphase2.c --- a/pypy/module/cpyext/test/_testmultiphase.c +++ b/pypy/module/cpyext/test/multiphase2.c @@ -241,7 +241,7 @@ static PyModuleDef main_def = TEST_MODULE_DEF("main", main_slots, testexport_methods); PyMODINIT_FUNC -PyInit__testmultiphase(PyObject *spec) +PyInit_multiphase2(PyObject *spec) { return PyModuleDef_Init(&main_def); } diff --git a/pypy/module/cpyext/test/test_module.py b/pypy/module/cpyext/test/test_module.py --- a/pypy/module/cpyext/test/test_module.py +++ b/pypy/module/cpyext/test/test_module.py @@ -135,10 +135,10 @@ raises(SystemError, self.import_module, name='multiphase', body=body, init=init) - def test_module(self): + def test_basic(self): import sys from importlib import machinery, util - NAME = '_testmultiphase' + NAME = 'multiphase2' module = self.import_module(name=NAME) finder = machinery.FileFinder(None) spec = util.find_spec(NAME) @@ -152,7 +152,7 @@ def test_functionality(self): import types - NAME = '_testmultiphase' + NAME = 'multiphase2' module = self.import_module(name=NAME) assert isinstance(module, types.ModuleType) ex = module.Example() @@ -170,7 +170,7 @@ def test_reload(self): import importlib - NAME = '_testmultiphase' + NAME = 'multiphase2' module = self.import_module(name=NAME) ex_class = module.Example importlib.reload(module) From pypy.commits at gmail.com Fri Jul 21 03:01:29 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 21 Jul 2017 00:01:29 -0700 (PDT) Subject: [pypy-commit] cffi default: Issue #300 Message-ID: <5971a6c9.4aa8df0a.13b04.9e8f@mx.google.com> Author: Armin Rigo Branch: Changeset: r2995:f928bdbf5e1f Date: 2017-07-21 09:01 +0200 http://bitbucket.org/cffi/cffi/changeset/f928bdbf5e1f/ Log: Issue #300 Hopefully fix the remaining cases where a _Bool return value was not correctly converted to a Python bool, but still gave a Python int. diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3849,6 +3849,7 @@ assert result == samples for i in range(len(samples)): assert result[i] == p[i] and type(result[i]) is type(p[i]) + assert (type(result[i]) is bool) == (type(samples[i]) is bool) # BInt = new_primitive_type("int") py.test.raises(TypeError, unpack, p) diff --git a/cffi/_cffi_include.h b/cffi/_cffi_include.h --- a/cffi/_cffi_include.h +++ b/cffi/_cffi_include.h @@ -95,6 +95,7 @@ #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -581,7 +581,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.BasePrimitiveType): - if tp.is_integer_type(): + if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif isinstance(tp, model.UnknownFloatType): return '_cffi_from_c_double(%s)' % (var,) diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -296,7 +296,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): - if tp.is_integer_type(): + if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) @@ -872,6 +872,7 @@ #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -40,6 +40,9 @@ * Progress on support for `callbacks in NetBSD`__. +* Functions returning booleans would in some case still return 0 or 1 + instead of False or True. Fixed. + .. __: https://bitbucket.org/cffi/cffi/issues/321/cffi-191-segmentation-fault-during-self diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -1449,20 +1449,30 @@ py.test.skip("_Bool not in MSVC") ffi = FFI() ffi.cdef("struct foo_s { _Bool x; };" - "_Bool foo(_Bool);") + "_Bool foo(_Bool); _Bool (*foop)(_Bool);") lib = ffi.verify(""" struct foo_s { _Bool x; }; int foo(int arg) { return !arg; } + _Bool _foofunc(_Bool x) { + return !x; + } + _Bool (*foop)(_Bool) = _foofunc; """) p = ffi.new("struct foo_s *") p.x = 1 - assert p.x == 1 + assert p.x is True py.test.raises(OverflowError, "p.x = -1") py.test.raises(TypeError, "p.x = 0.0") - assert lib.foo(1) == 0 - assert lib.foo(0) == 1 + assert lib.foop(1) is False + assert lib.foop(True) is False + assert lib.foop(0) is True + py.test.raises(OverflowError, lib.foop, 42) + py.test.raises(TypeError, lib.foop, 0.0) + assert lib.foo(1) is False + assert lib.foo(True) is False + assert lib.foo(0) is True py.test.raises(OverflowError, lib.foo, 42) py.test.raises(TypeError, lib.foo, 0.0) assert int(ffi.cast("_Bool", long(1))) == 1 diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1938,7 +1938,7 @@ ffi = FFI() ffi.cdef("bool f(void);") lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }") - assert lib.f() == 1 + assert lib.f() is True def test_bool_in_cpp_2(): ffi = FFI() diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -1418,20 +1418,30 @@ py.test.skip("_Bool not in MSVC") ffi = FFI() ffi.cdef("struct foo_s { _Bool x; };" - "_Bool foo(_Bool);") + "_Bool foo(_Bool); _Bool (*foop)(_Bool);") lib = ffi.verify(""" struct foo_s { _Bool x; }; int foo(int arg) { return !arg; } + _Bool _foofunc(_Bool x) { + return !x; + } + _Bool (*foop)(_Bool) = _foofunc; """) p = ffi.new("struct foo_s *") p.x = 1 - assert p.x == 1 + assert p.x is True py.test.raises(OverflowError, "p.x = -1") py.test.raises(TypeError, "p.x = 0.0") - assert lib.foo(1) == 0 - assert lib.foo(0) == 1 + assert lib.foop(1) is False + assert lib.foop(True) is False + assert lib.foop(0) is True + py.test.raises(OverflowError, lib.foop, 42) + py.test.raises(TypeError, lib.foop, 0.0) + assert lib.foo(1) is False + assert lib.foo(True) is False + assert lib.foo(0) is True py.test.raises(OverflowError, lib.foo, 42) py.test.raises(TypeError, lib.foo, 0.0) assert int(ffi.cast("_Bool", long(1))) == 1 From pypy.commits at gmail.com Fri Jul 21 04:19:12 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 21 Jul 2017 01:19:12 -0700 (PDT) Subject: [pypy-commit] pypy default: import cffi/f928bdbf5e1f Message-ID: <5971b900.2684df0a.fb663.92b8@mx.google.com> Author: Armin Rigo Branch: Changeset: r91946:2d76a6b9d1c2 Date: 2017-07-21 10:18 +0200 http://bitbucket.org/pypy/pypy/changeset/2d76a6b9d1c2/ Log: import cffi/f928bdbf5e1f diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -95,6 +95,7 @@ #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -1,7 +1,12 @@ /***** Support code for embedding *****/ -#if defined(_MSC_VER) +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) # define CFFI_DLLEXPORT __declspec(dllexport) #elif defined(__GNUC__) # define CFFI_DLLEXPORT __attribute__((visibility("default"))) @@ -525,3 +530,7 @@ #undef cffi_compare_and_swap #undef cffi_write_barrier #undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -412,6 +412,9 @@ prnt(' }') prnt(' p[0] = (const void *)0x%x;' % self._version) prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in # 'export_symbols', so instead of fighting it, just give up and @@ -578,7 +581,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.BasePrimitiveType): - if tp.is_integer_type(): + if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif isinstance(tp, model.UnknownFloatType): return '_cffi_from_c_double(%s)' % (var,) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -296,7 +296,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): - if tp.is_integer_type(): + if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) @@ -872,6 +872,7 @@ #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3838,6 +3838,7 @@ assert result == samples for i in range(len(samples)): assert result[i] == p[i] and type(result[i]) is type(p[i]) + assert (type(result[i]) is bool) == (type(samples[i]) is bool) # BInt = new_primitive_type("int") py.test.raises(TypeError, unpack, p) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1450,20 +1450,30 @@ py.test.skip("_Bool not in MSVC") ffi = FFI() ffi.cdef("struct foo_s { _Bool x; };" - "_Bool foo(_Bool);") + "_Bool foo(_Bool); _Bool (*foop)(_Bool);") lib = ffi.verify(""" struct foo_s { _Bool x; }; int foo(int arg) { return !arg; } + _Bool _foofunc(_Bool x) { + return !x; + } + _Bool (*foop)(_Bool) = _foofunc; """) p = ffi.new("struct foo_s *") p.x = 1 - assert p.x == 1 + assert p.x is True py.test.raises(OverflowError, "p.x = -1") py.test.raises(TypeError, "p.x = 0.0") - assert lib.foo(1) == 0 - assert lib.foo(0) == 1 + assert lib.foop(1) is False + assert lib.foop(True) is False + assert lib.foop(0) is True + py.test.raises(OverflowError, lib.foop, 42) + py.test.raises(TypeError, lib.foop, 0.0) + assert lib.foo(1) is False + assert lib.foo(True) is False + assert lib.foo(0) is True py.test.raises(OverflowError, lib.foo, 42) py.test.raises(TypeError, lib.foo, 0.0) assert int(ffi.cast("_Bool", long(1))) == 1 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1939,7 +1939,7 @@ ffi = FFI() ffi.cdef("bool f(void);") lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }") - assert lib.f() == 1 + assert lib.f() is True def test_bool_in_cpp_2(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1419,20 +1419,30 @@ py.test.skip("_Bool not in MSVC") ffi = FFI() ffi.cdef("struct foo_s { _Bool x; };" - "_Bool foo(_Bool);") + "_Bool foo(_Bool); _Bool (*foop)(_Bool);") lib = ffi.verify(""" struct foo_s { _Bool x; }; int foo(int arg) { return !arg; } + _Bool _foofunc(_Bool x) { + return !x; + } + _Bool (*foop)(_Bool) = _foofunc; """) p = ffi.new("struct foo_s *") p.x = 1 - assert p.x == 1 + assert p.x is True py.test.raises(OverflowError, "p.x = -1") py.test.raises(TypeError, "p.x = 0.0") - assert lib.foo(1) == 0 - assert lib.foo(0) == 1 + assert lib.foop(1) is False + assert lib.foop(True) is False + assert lib.foop(0) is True + py.test.raises(OverflowError, lib.foop, 42) + py.test.raises(TypeError, lib.foop, 0.0) + assert lib.foo(1) is False + assert lib.foo(True) is False + assert lib.foo(0) is True py.test.raises(OverflowError, lib.foo, 42) py.test.raises(TypeError, lib.foo, 0.0) assert int(ffi.cast("_Bool", long(1))) == 1 From pypy.commits at gmail.com Fri Jul 21 07:04:05 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 21 Jul 2017 04:04:05 -0700 (PDT) Subject: [pypy-commit] pypy multiphase: fix test name (was shadowing the existing 'test_basic') Message-ID: <5971dfa5.9ea1df0a.dd7dc.2ec0@mx.google.com> Author: Ronan Lamy Branch: multiphase Changeset: r91947:bbea9a4a0c49 Date: 2017-07-21 13:03 +0200 http://bitbucket.org/pypy/pypy/changeset/bbea9a4a0c49/ Log: fix test name (was shadowing the existing 'test_basic') diff --git a/pypy/module/cpyext/test/test_module.py b/pypy/module/cpyext/test/test_module.py --- a/pypy/module/cpyext/test/test_module.py +++ b/pypy/module/cpyext/test/test_module.py @@ -135,7 +135,7 @@ raises(SystemError, self.import_module, name='multiphase', body=body, init=init) - def test_basic(self): + def test_multiphase2(self): import sys from importlib import machinery, util NAME = 'multiphase2' From pypy.commits at gmail.com Fri Jul 21 08:14:55 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 21 Jul 2017 05:14:55 -0700 (PDT) Subject: [pypy-commit] pypy default: kill dead code Message-ID: <5971f03f.89d41c0a.91fcc.8b42@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91948:a8c055058298 Date: 2017-07-21 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a8c055058298/ Log: kill dead code diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -1,16 +1,12 @@ import sys -import weakref import pytest -from pypy.tool.cpyext.extbuild import ( - SystemCompilationInfo, HERE, get_sys_info_app) +from pypy.tool.cpyext.extbuild import SystemCompilationInfo, HERE from pypy.interpreter.gateway import unwrap_spec, interp2app -from rpython.rtyper.lltypesystem import lltype, ll2ctypes +from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext import api from pypy.module.cpyext.state import State -from pypy.module.cpyext.pyobject import Py_DecRef -from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder from rpython.rlib import rawrefcount from rpython.tool.udir import udir @@ -76,13 +72,6 @@ def freeze_refcnts(self): rawrefcount._dont_free_any_more() - return #ZZZ - state = self.space.fromcache(RefcountState) - self.frozen_refcounts = {} - for w_obj, obj in state.py_objects_w2r.iteritems(): - self.frozen_refcounts[w_obj] = obj.c_ob_refcnt - #state.print_refcounts() - self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values()) class LeakCheckingTest(object): """Base class for all cpyext tests.""" @@ -91,78 +80,14 @@ 'micronumpy', 'mmap' ]) - enable_leak_checking = True - @staticmethod def cleanup_references(space): - return #ZZZ - state = space.fromcache(RefcountState) - - import gc; gc.collect() - # Clear all lifelines, objects won't resurrect - for w_obj, obj in state.lifeline_dict._dict.items(): - if w_obj not in state.py_objects_w2r: - state.lifeline_dict.set(w_obj, None) - del obj - import gc; gc.collect() - - - for w_obj in state.non_heaptypes_w: - Py_DecRef(space, w_obj) - state.non_heaptypes_w[:] = [] - state.reset_borrowed_references() + return def check_and_print_leaks(self): rawrefcount._collect() - # check for sane refcnts - import gc - - if 1: #ZZZ not self.enable_leak_checking: - leakfinder.stop_tracking_allocations(check=False) - return False - - leaking = False - state = self.space.fromcache(RefcountState) - gc.collect() - lost_objects_w = identity_dict() - lost_objects_w.update((key, None) for key in self.frozen_refcounts.keys()) - - for w_obj, obj in state.py_objects_w2r.iteritems(): - base_refcnt = self.frozen_refcounts.get(w_obj) - delta = obj.c_ob_refcnt - if base_refcnt is not None: - delta -= base_refcnt - lost_objects_w.pop(w_obj) - if delta != 0: - leaking = True - print >>sys.stderr, "Leaking %r: %i references" % (w_obj, delta) - try: - weakref.ref(w_obj) - except TypeError: - lifeline = None - else: - lifeline = state.lifeline_dict.get(w_obj) - if lifeline is not None: - refcnt = lifeline.pyo.c_ob_refcnt - if refcnt > 0: - print >>sys.stderr, "\tThe object also held by C code." - else: - referrers_repr = [] - for o in gc.get_referrers(w_obj): - try: - repr_str = repr(o) - except TypeError as e: - repr_str = "%s (type of o is %s)" % (str(e), type(o)) - referrers_repr.append(repr_str) - referrers = ", ".join(referrers_repr) - print >>sys.stderr, "\tThe object is referenced by these objects:", \ - referrers - for w_obj in lost_objects_w: - print >>sys.stderr, "Lost object %r" % (w_obj, ) - leaking = True - # the actual low-level leak checking is done by pypy.tool.leakfinder, - # enabled automatically by pypy.conftest. - return leaking + leakfinder.stop_tracking_allocations(check=False) + return False class AppTestApi(LeakCheckingTest): def setup_class(cls): @@ -415,7 +340,6 @@ def test_export_docstring(self): - import sys init = """ if (Py_IsInitialized()) Py_InitModule("foo", methods); @@ -534,7 +458,6 @@ def test_export_function2(self): - import sys init = """ if (Py_IsInitialized()) Py_InitModule("foo", methods); From pypy.commits at gmail.com Fri Jul 21 08:51:02 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 21 Jul 2017 05:51:02 -0700 (PDT) Subject: [pypy-commit] pypy default: cleanup cleanup code Message-ID: <5971f8b6.a4a6df0a.75422.c83c@mx.google.com> Author: Ronan Lamy Branch: Changeset: r91949:a22659423f20 Date: 2017-07-21 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/a22659423f20/ Log: cleanup cleanup code diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -64,14 +64,7 @@ except OperationError as e: print e.errorstr(self.space) raise - - try: - self.space.getexecutioncontext().cleanup_cpyext_state() - except AttributeError: - pass - - if self.check_and_print_leaks(): - assert False, "Test leaks or loses object(s)." + self.cleanup() @slot_function([PyObject], lltype.Void) def PyPy_GetWrapped(space, w_arg): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -80,14 +80,13 @@ 'micronumpy', 'mmap' ]) - @staticmethod - def cleanup_references(space): - return + def cleanup(self): + self.space.getexecutioncontext().cleanup_cpyext_state() + rawrefcount._collect() + self.space.user_del_action._run_finalizers() + leakfinder.stop_tracking_allocations(check=False) + assert not self.space.finalizer_queue.next_dead() - def check_and_print_leaks(self): - rawrefcount._collect() - leakfinder.stop_tracking_allocations(check=False) - return False class AppTestApi(LeakCheckingTest): def setup_class(cls): @@ -104,15 +103,7 @@ def teardown_method(self, meth): if self.runappdirect: return - self.space.getexecutioncontext().cleanup_cpyext_state() - self.cleanup_references(self.space) - # XXX: like AppTestCpythonExtensionBase.teardown_method: - # find out how to disable check_and_print_leaks() if the - # test failed - assert not self.check_and_print_leaks(), ( - "Test leaks or loses object(s). You should also check if " - "the test actually passed in the first place; if it failed " - "it is likely to reach this place.") + self.cleanup() @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_only_import(self): @@ -280,7 +271,6 @@ self.space.call_method(self.space.sys.get("stdout"), "flush") freeze_refcnts(self) - #self.check_and_print_leaks() def unimport_module(self, name): """ @@ -292,17 +282,12 @@ def teardown_method(self, func): if self.runappdirect: + self.w_debug_collect() return + debug_collect(self.space) for name in self.imported_module_names: self.unimport_module(name) - self.space.getexecutioncontext().cleanup_cpyext_state() - self.cleanup_references(self.space) - # XXX: find out how to disable check_and_print_leaks() if the - # test failed... - assert not self.check_and_print_leaks(), ( - "Test leaks or loses object(s). You should also check if " - "the test actually passed in the first place; if it failed " - "it is likely to reach this place.") + self.cleanup() class AppTestCpythonExtension(AppTestCpythonExtensionBase): From pypy.commits at gmail.com Fri Jul 21 09:52:37 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 21 Jul 2017 06:52:37 -0700 (PDT) Subject: [pypy-commit] pypy default: document need to consider changing SOABI when releasing Message-ID: <59720725.1aa4df0a.24aa4.6d74@mx.google.com> Author: Matti Picus Branch: Changeset: r91950:fcd9df9494da Date: 2017-07-21 16:51 +0300 http://bitbucket.org/pypy/pypy/changeset/fcd9df9494da/ Log: document need to consider changing SOABI when releasing diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -40,6 +40,9 @@ sure things are ported back to the trunk and to the branch as necessary. +* Maybe bump the SOABI number in module/imp/importing. This has many + implications, so make sure the PyPy community agrees to the change. + * Update and write documentation * update pypy/doc/contributor.rst (and possibly LICENSE) From pypy.commits at gmail.com Fri Jul 21 11:35:57 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 21 Jul 2017 08:35:57 -0700 (PDT) Subject: [pypy-commit] cffi default: Mention the embedding problem on Debian that was discussed in issue #264. Message-ID: <59721f5d.c6071c0a.5f39b.baa5@mx.google.com> Author: Armin Rigo Branch: Changeset: r2996:2f3c1c595e96 Date: 2017-07-21 17:35 +0200 http://bitbucket.org/cffi/cffi/changeset/2f3c1c595e96/ Log: Mention the embedding problem on Debian that was discussed in issue #264. diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -359,6 +359,15 @@ -Wl,-rpath=\$ORIGIN``. From a Makefile, you need to say something like ``gcc -Wl,-rpath=\$$ORIGIN``. +* On some Linux distributions, notably Debian, the ``.so`` files of + CPython C extension modules may be compiled without saying that they + depend on ``libpythonX.Y.so``. This makes such Python systems + unsuitable for embedding if the embedder uses ``dlopen(..., + RTLD_LOCAL)``. You get the error ``undefined symbol: + PyExc_SystemError``. See `issue #264`__. + +.. __: https://bitbucket.org/cffi/cffi/issues/264/ + Using multiple CFFI-made DLLs ----------------------------- From pypy.commits at gmail.com Fri Jul 21 18:10:42 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 21 Jul 2017 15:10:42 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Try to enable leakchecker in cpyext tests; pre-create some PyObjects for long-lived objects Message-ID: <59727be2.6596df0a.20289.4a8f@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91951:e5f5f5b6191c Date: 2017-07-22 00:09 +0200 http://bitbucket.org/pypy/pypy/changeset/e5f5f5b6191c/ Log: Try to enable leakchecker in cpyext tests; pre-create some PyObjects for long-lived objects diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -84,7 +84,7 @@ self.space.getexecutioncontext().cleanup_cpyext_state() rawrefcount._collect() self.space.user_del_action._run_finalizers() - leakfinder.stop_tracking_allocations(check=False) + leakfinder.stop_tracking_allocations(check=True) assert not self.space.finalizer_queue.next_dead() @@ -131,6 +131,18 @@ def debug_collect(space): rawrefcount._collect() +def preload(space, name): + from pypy.module.cpyext.pyobject import make_ref + if '.' not in name: + w_obj = space.builtin.getdictvalue(space, name) + else: + module, localname = name.rsplit('.', 1) + code = "(): import {module}; return {module}.{localname}" + code = code.format(**locals()) + w_obj = space.appexec([], code) + make_ref(space, w_obj) + + class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): @@ -144,6 +156,8 @@ # 'import os' to warm up reference counts w_import = space.builtin.getdictvalue(space, '__import__') space.call_function(w_import, space.wrap("os")) + for name in ['buffer', 'mmap.mmap']: + preload(space, name) #state = cls.space.fromcache(RefcountState) ZZZ #state.non_heaptypes_w[:] = [] cls.w_debug_collect = space.wrap(interp2app(debug_collect)) From pypy.commits at gmail.com Fri Jul 21 19:23:39 2017 From: pypy.commits at gmail.com (mjacob) Date: Fri, 21 Jul 2017 16:23:39 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Add temporary "solution" for failing 'import site'. Message-ID: <59728cfb.c8321c0a.bbed2.d24e@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91952:75d795b23931 Date: 2017-07-21 20:05 +0200 http://bitbucket.org/pypy/pypy/changeset/75d795b23931/ Log: Add temporary "solution" for failing 'import site'. diff --git a/lib-python/3/sysconfig.py b/lib-python/3/sysconfig.py --- a/lib-python/3/sysconfig.py +++ b/lib-python/3/sysconfig.py @@ -355,6 +355,8 @@ def _get_sysconfigdata_name(): + # FIXME: temporary hack for PyPy + return '_sysconfigdata' return os.environ.get('_PYTHON_SYSCONFIGDATA_NAME', '_sysconfigdata_{abi}_{platform}_{multiarch}'.format( abi=sys.abiflags, From pypy.commits at gmail.com Fri Jul 21 19:30:05 2017 From: pypy.commits at gmail.com (mjacob) Date: Fri, 21 Jul 2017 16:30:05 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Re-apply part of 46bb03e8. Message-ID: <59728e7d.968bdf0a.14859.de5c@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91953:c42f02b742b9 Date: 2017-07-22 01:28 +0200 http://bitbucket.org/pypy/pypy/changeset/c42f02b742b9/ Log: Re-apply part of 46bb03e8. diff --git a/lib-python/3/ctypes/__init__.py b/lib-python/3/ctypes/__init__.py --- a/lib-python/3/ctypes/__init__.py +++ b/lib-python/3/ctypes/__init__.py @@ -434,14 +434,6 @@ #pydll = LibraryLoader(PyDLL) if _os.name == "nt": - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - - -if _os.name == "nt": windll = LibraryLoader(WinDLL) oledll = LibraryLoader(OleDLL) From pypy.commits at gmail.com Fri Jul 21 20:00:20 2017 From: pypy.commits at gmail.com (mjacob) Date: Fri, 21 Jul 2017 17:00:20 -0700 (PDT) Subject: [pypy-commit] pypy py3.6: Test and fix: a global declaration after an assignment with the same name is now a SyntaxError instead of only a warning. Message-ID: <59729594.833f1c0a.e8a6b.c44e@mx.google.com> Author: Manuel Jacob Branch: py3.6 Changeset: r91954:1c770e1ebaa0 Date: 2017-07-22 01:59 +0200 http://bitbucket.org/pypy/pypy/changeset/1c770e1ebaa0/ Log: Test and fix: a global declaration after an assignment with the same name is now a SyntaxError instead of only a warning. diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -492,9 +492,7 @@ else: msg = "name '%s' is used prior to global declaration" % \ (name,) - misc.syntax_warning(self.space, msg, - self.compile_info.filename, - glob.lineno, glob.col_offset) + raise SyntaxError(msg, glob.lineno, glob.col_offset) self.note_symbol(name, SYM_GLOBAL) def visit_Nonlocal(self, nonl): @@ -519,9 +517,7 @@ else: msg = "name '%s' is used prior to nonlocal declaration" % \ (name,) - misc.syntax_warning(self.space, msg, - self.compile_info.filename, - nonl.lineno, nonl.col_offset) + raise SyntaxError(msg, nonl.lineno, nonl.col_offset) self.note_symbol(name, SYM_NONLOCAL) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -318,6 +318,14 @@ x = g.lookup_role('x') assert x == symtable.SYM_GLOBAL + def test_global_after_assignment(self): + src = ("def f():\n" + " x = 1\n" + " global x\n") + exc = py.test.raises(SyntaxError, self.func_scope, src).value + assert exc.lineno == 3 + assert exc.msg == "name 'x' is assigned to before global declaration" + def test_nonlocal(self): src = """ x = 1 @@ -396,6 +404,14 @@ assert exc.msg == "name 'x' is parameter and nonlocal" assert exc.lineno == 4 + def test_nonlocal_after_assignment(self): + src = ("def f():\n" + " x = 1\n" + " nonlocal x\n") + exc = py.test.raises(SyntaxError, self.func_scope, src).value + assert exc.lineno == 3 + assert exc.msg == "name 'x' is assigned to before nonlocal declaration" + def test_optimization(self): assert not self.mod_scope("").can_be_optimized assert not self.class_scope("class x: pass").can_be_optimized From pypy.commits at gmail.com Sat Jul 22 13:30:42 2017 From: pypy.commits at gmail.com (rlamy) Date: Sat, 22 Jul 2017 10:30:42 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Temporarily disable tp_mro and tp_dict slots to avoid ref cycles that prevent types from being collected Message-ID: <59738bc2.5ca5df0a.2e8e4.5d57@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91955:0d18cd6d4afc Date: 2017-07-22 18:32 +0200 http://bitbucket.org/pypy/pypy/changeset/0d18cd6d4afc/ Log: Temporarily disable tp_mro and tp_dict slots to avoid ref cycles that prevent types from being collected diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -674,9 +674,9 @@ obj_pto = rffi.cast(PyTypeObjectPtr, obj) base_pyo = rffi.cast(PyObject, obj_pto.c_tp_base) Py_DecRef(space, obj_pto.c_tp_bases) - Py_DecRef(space, obj_pto.c_tp_mro) + #Py_DecRef(space, obj_pto.c_tp_mro) Py_DecRef(space, obj_pto.c_tp_cache) # let's do it like cpython - Py_DecRef(space, obj_pto.c_tp_dict) + #Py_DecRef(space, obj_pto.c_tp_dict) if obj_pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: heaptype = rffi.cast(PyHeapTypeObject, obj) Py_DecRef(space, heaptype.c_ht_name) @@ -928,7 +928,7 @@ """ Sets up other attributes, when the interpreter type has been created. """ - pto.c_tp_mro = make_ref(space, space.newtuple(w_obj.mro_w)) + #pto.c_tp_mro = make_ref(space, space.newtuple(w_obj.mro_w)) base = pto.c_tp_base if base: inherit_special(space, pto, base) @@ -951,7 +951,7 @@ w_dict = w_obj.getdict(space) # pass in the w_obj to convert any values that are # unbound GetSetProperty into bound PyGetSetDescrObject - pto.c_tp_dict = make_ref(space, w_dict, w_obj) + #pto.c_tp_dict = make_ref(space, w_dict, w_obj) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) def PyType_IsSubtype(space, a, b): From pypy.commits at gmail.com Sat Jul 22 17:12:11 2017 From: pypy.commits at gmail.com (rlamy) Date: Sat, 22 Jul 2017 14:12:11 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: progress Message-ID: <5973bfab.cb141c0a.84cb7.2854@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91956:a733270b6626 Date: 2017-07-22 23:11 +0200 http://bitbucket.org/pypy/pypy/changeset/a733270b6626/ Log: progress diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -142,6 +142,12 @@ w_obj = space.appexec([], code) make_ref(space, w_obj) +def preload_expr(space, expr): + from pypy.module.cpyext.pyobject import make_ref + code = "(): return {}".format(expr) + w_obj = space.appexec([], code) + make_ref(space, w_obj) + class AppTestCpythonExtensionBase(LeakCheckingTest): @@ -158,6 +164,8 @@ space.call_function(w_import, space.wrap("os")) for name in ['buffer', 'mmap.mmap']: preload(space, name) + for expr in ['type(str.join)']: + preload_expr(space, expr) #state = cls.space.fromcache(RefcountState) ZZZ #state.non_heaptypes_w[:] = [] cls.w_debug_collect = space.wrap(interp2app(debug_collect)) From pypy.commits at gmail.com Sun Jul 23 03:39:15 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 23 Jul 2017 00:39:15 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Issue #2615 Message-ID: <597452a3.045e1c0a.f6bb3.28ba@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91957:a92ebe04ee55 Date: 2017-07-23 09:38 +0200 http://bitbucket.org/pypy/pypy/changeset/a92ebe04ee55/ Log: Issue #2615 Don't use a static buffer. Corruption in a multithreaded environment! diff --git a/lib_pypy/_cffi_ssl/_stdssl/certificate.py b/lib_pypy/_cffi_ssl/_stdssl/certificate.py --- a/lib_pypy/_cffi_ssl/_stdssl/certificate.py +++ b/lib_pypy/_cffi_ssl/_stdssl/certificate.py @@ -173,14 +173,13 @@ return tuple(dn) -STATIC_BIO_BUF = ffi.new("char[]", 2048) - def _bio_get_str(biobuf): - length = lib.BIO_gets(biobuf, STATIC_BIO_BUF, len(STATIC_BIO_BUF)-1) + bio_buf = ffi.new("char[]", 2048) + length = lib.BIO_gets(biobuf, bio_buf, len(bio_buf)-1) if length < 0: if biobuf: lib.BIO_free(biobuf) raise ssl_error(None) - return _str_with_len(STATIC_BIO_BUF, length) + return _str_with_len(bio_buf, length) def _decode_certificate(certificate): retval = {} From pypy.commits at gmail.com Mon Jul 24 12:02:50 2017 From: pypy.commits at gmail.com (arigo) Date: Mon, 24 Jul 2017 09:02:50 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2612 Message-ID: <59761a2a.87da1c0a.c5b30.883e@mx.google.com> Author: Armin Rigo Branch: Changeset: r91958:3c4fb99e0c59 Date: 2017-07-24 10:07 +0200 http://bitbucket.org/pypy/pypy/changeset/3c4fb99e0c59/ Log: Issue #2612 Simplify gc.get_referrers(). Before, it wasn't guaranteed to return the opposite result as gc.get_referents(). diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -47,57 +47,6 @@ # ____________________________________________________________ -class PathEntry(object): - # PathEntries are nodes of a complete tree of all objects, but - # built lazily (there is only one branch alive at any time). - # Each node has a 'gcref' and the list of referents from this gcref. - def __init__(self, prev, gcref, referents): - self.prev = prev - self.gcref = gcref - self.referents = referents - self.remaining = len(referents) - - def get_most_recent_w_obj(self): - entry = self - while entry is not None: - if entry.gcref: - w_obj = try_cast_gcref_to_w_root(entry.gcref) - if w_obj is not None: - return w_obj - entry = entry.prev - return None - -def do_get_referrers(w_arg): - result_w = [] - gcarg = rgc.cast_instance_to_gcref(w_arg) - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - head = PathEntry(None, rgc.NULL_GCREF, roots) - while True: - head.remaining -= 1 - if head.remaining >= 0: - gcref = head.referents[head.remaining] - if not rgc.get_gcflag_extra(gcref): - # not visited so far - if gcref == gcarg: - w_obj = head.get_most_recent_w_obj() - if w_obj is not None: - result_w.append(w_obj) # found! - rgc.toggle_gcflag_extra(gcref) # toggle twice - rgc.toggle_gcflag_extra(gcref) - head = PathEntry(head, gcref, rgc.get_rpy_referents(gcref)) - else: - # no more referents to visit - head = head.prev - if head is None: - break - # done. Clear flags carefully - rgc.toggle_gcflag_extra(gcarg) - rgc.clear_gcflag_extra(roots) - rgc.clear_gcflag_extra([gcarg]) - return result_w - -# ____________________________________________________________ - def _list_w_obj_referents(gcref, result_w): # Get all W_Root reachable directly from gcref, and add them to # the list 'result_w'. @@ -184,9 +133,22 @@ """Return the list of objects that directly refer to any of objs.""" if not rgc.has_gcflag_extra(): raise missing_operation(space) + # xxx uses a lot of memory to make the list of all W_Root objects, + # but it's simpler this way and more correct than the previous + # version of this code (issue #2612). It is potentially very slow + # because each of the n calls to _list_w_obj_referents() could take + # O(n) time as well, in theory, but I hope in practice the whole + # thing takes much less than O(n^2). We could re-add an algorithm + # that visits most objects only once, if needed... + all_objects_w = rgc.do_get_objects(try_cast_gcref_to_w_root) result_w = [] - for w_arg in args_w: - result_w += do_get_referrers(w_arg) + for w_obj in all_objects_w: + refs_w = [] + gcref = rgc.cast_instance_to_gcref(w_obj) + _list_w_obj_referents(gcref, refs_w) + for w_arg in args_w: + if w_arg in refs_w: + result_w.append(w_obj) rgc.assert_no_more_gcflags() return space.newlist(result_w) diff --git a/pypy/module/gc/test/test_referents.py b/pypy/module/gc/test/test_referents.py --- a/pypy/module/gc/test/test_referents.py +++ b/pypy/module/gc/test/test_referents.py @@ -116,3 +116,37 @@ break # found else: assert 0, "the tuple (7,) is not found as gc.get_referrers(7)" + + +class AppTestReferentsMore(object): + + def setup_class(cls): + from rpython.rlib import rgc + cls._backup = [rgc.get_rpy_roots] + l4 = cls.space.newlist([]) + cls.ALL_ROOTS = [l4] + cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS) + rgc.get_rpy_roots = lambda: ( + map(rgc._GcRef, cls.ALL_ROOTS) + [rgc.NULL_GCREF]*2) + cls.w_runappdirect = cls.space.wrap(option.runappdirect) + + def teardown_class(cls): + from rpython.rlib import rgc + rgc.get_rpy_roots = cls._backup[0] + + def test_get_referrers(self): + import gc + class A(object): + pass + a = A() + if not self.runappdirect: + l4 = self.ALL_ROOTS[0] + l4.append(a) # add 'a' to the list which is in roots + lst = gc.get_referrers(A) + assert a in lst + lst = gc.get_referrers(A) + assert a in lst + lst = gc.get_referrers(A) + assert a in lst + lst = gc.get_referrers(A) + assert a in lst From pypy.commits at gmail.com Mon Jul 24 12:32:41 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 24 Jul 2017 09:32:41 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Filter out interned string from leakfinder output Message-ID: <59762129.500a1c0a.5bf24.6991@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91960:683cc621ceca Date: 2017-07-24 12:49 +0200 http://bitbucket.org/pypy/pypy/changeset/683cc621ceca/ Log: Filter out interned string from leakfinder output diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -4,6 +4,7 @@ from pypy.tool.cpyext.extbuild import SystemCompilationInfo, HERE from pypy.interpreter.gateway import unwrap_spec, interp2app +from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext import api from pypy.module.cpyext.state import State @@ -73,6 +74,25 @@ def freeze_refcnts(self): rawrefcount._dont_free_any_more() +def is_interned_string(space, w_obj): + try: + s = space.str_w(w_obj) + except OperationError: + return False + return space.is_interned_str(s) + +def is_allowed_to_leak(space, obj): + from pypy.module.cpyext.pyobject import from_ref + from pypy.module.cpyext.api import cts + try: + w_obj = from_ref(space, cts.cast('PyObject*', obj._as_ptr())) + except: + return False + # It's OK to "leak" some interned strings: if the pyobj is created by + # the test, but the w_obj is referred to from elsewhere. + return is_interned_string(space, w_obj) + + class LeakCheckingTest(object): """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', 'struct', 'array', @@ -84,7 +104,16 @@ self.space.getexecutioncontext().cleanup_cpyext_state() rawrefcount._collect() self.space.user_del_action._run_finalizers() - leakfinder.stop_tracking_allocations(check=True) + try: + leakfinder.stop_tracking_allocations(check=True) + except leakfinder.MallocMismatch as e: + result = e.args[0] + filtered_result = {} + for obj in result: + if not is_allowed_to_leak(self.space, obj): + filtered_result[obj] = result[obj] + if filtered_result: + raise leakfinder.MallocMismatch(filtered_result) assert not self.space.finalizer_queue.next_dead() From pypy.commits at gmail.com Mon Jul 24 12:32:43 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 24 Jul 2017 09:32:43 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Fix leaking pybuf.c_format Message-ID: <5976212b.308fdf0a.454f4.d4cc@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91961:e698bd62fa9d Date: 2017-07-24 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/e698bd62fa9d/ Log: Fix leaking pybuf.c_format diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -82,11 +82,10 @@ for i in range(self.ndim): pybuf.c_shape[i] = self.shape[i] pybuf.c_strides[i] = self.strides[i] - if self.format: - pybuf.c_format = rffi.str2charp(self.format) - else: - pybuf.c_format = rffi.str2charp("B") - generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + with rffi.scoped_str2charp( + self.format if self.format else "B") as fmt: + pybuf.c_format = fmt + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) decref(self.space, self.pyobj) self.pyobj = lltype.nullptr(PyObject.TO) else: @@ -167,6 +166,8 @@ sizep[0] = size return 0 +DEFAULT_FMT = rffi.str2charp("B") + @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, lltype.Signed, lltype.Signed], rffi.INT, error=-1) def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): @@ -187,7 +188,8 @@ rffi.setintfield(view, 'c_ndim', 1) view.c_format = lltype.nullptr(rffi.CCHARP.TO) if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: - view.c_format = rffi.str2charp("B") + # NB: this needs to be a static string, because nothing frees it + view.c_format = DEFAULT_FMT view.c_shape = lltype.nullptr(Py_ssize_tP.TO) if (flags & PyBUF_ND) == PyBUF_ND: view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) From pypy.commits at gmail.com Mon Jul 24 12:32:39 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 24 Jul 2017 09:32:39 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Mark the tp_bases of non-heap types as immortal Message-ID: <59762127.d0141c0a.e04af.b54c@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91959:20521e5ee07a Date: 2017-07-24 00:07 +0200 http://bitbucket.org/pypy/pypy/changeset/20521e5ee07a/ Log: Mark the tp_bases of non-heap types as immortal diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -29,7 +29,7 @@ from pypy.module.cpyext.typeobject import subtype_dealloc return subtype_dealloc.api_func - def allocate(self, space, w_type, itemcount=0): + def allocate(self, space, w_type, itemcount=0, immortal=False): # typically called from PyType_GenericAlloc via typedescr.allocate # this returns a PyObject with ob_refcnt == 1. @@ -50,7 +50,7 @@ assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw', zero=True, - add_memory_pressure=True) + add_memory_pressure=True, immortal=immortal) pyobj = rffi.cast(PyObject, buf) if pytype.c_tp_itemsize: pyvarobj = rffi.cast(PyVarObject, pyobj) @@ -102,7 +102,7 @@ basestruct = tp_basestruct if tp_alloc: - def allocate(self, space, w_type, itemcount=0): + def allocate(self, space, w_type, itemcount=0, immortal=False): return tp_alloc(space, w_type, itemcount) if tp_dealloc: @@ -151,7 +151,7 @@ class InvalidPointerException(Exception): pass -def create_ref(space, w_obj, w_userdata=None): +def create_ref(space, w_obj, w_userdata=None, immortal=False): """ Allocates a PyObject, and fills its fields with info from the given interpreter object. @@ -163,7 +163,7 @@ itemcount = space.len_w(w_obj) # PyBytesObject and subclasses else: itemcount = 0 - py_obj = typedescr.allocate(space, w_type, itemcount=itemcount) + py_obj = typedescr.allocate(space, w_type, itemcount=itemcount, immortal=immortal) track_reference(space, py_obj, w_obj) # # py_obj.c_ob_refcnt should be exactly REFCNT_FROM_PYPY + 1 here, @@ -227,7 +227,7 @@ assert isinstance(w_type, W_TypeObject) return get_typedescr(w_type.layout.typedef).realize(space, ref) -def as_pyobj(space, w_obj, w_userdata=None): +def as_pyobj(space, w_obj, w_userdata=None, immortal=False): """ Returns a 'PyObject *' representing the given intepreter object. This doesn't give a new reference, but the returned 'PyObject *' @@ -239,7 +239,7 @@ assert not is_pyobj(w_obj) py_obj = rawrefcount.from_obj(PyObject, w_obj) if not py_obj: - py_obj = create_ref(space, w_obj, w_userdata) + py_obj = create_ref(space, w_obj, w_userdata, immortal=immortal) return py_obj else: return lltype.nullptr(PyObject.TO) @@ -270,7 +270,7 @@ return hop.inputconst(lltype.Bool, hop.s_result.const) @specialize.ll() -def make_ref(space, obj, w_userdata=None): +def make_ref(space, obj, w_userdata=None, immortal=False): """Increment the reference counter of the PyObject and return it. Can be called with either a PyObject or a W_Root. """ @@ -278,7 +278,7 @@ pyobj = rffi.cast(PyObject, obj) at_least = 1 else: - pyobj = as_pyobj(space, obj, w_userdata) + pyobj = as_pyobj(space, obj, w_userdata, immortal=immortal) at_least = rawrefcount.REFCNT_FROM_PYPY if pyobj: assert pyobj.c_ob_refcnt >= at_least diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -922,7 +922,9 @@ bases_w = [] else: bases_w = [from_ref(space, base_pyo)] - pto.c_tp_bases = make_ref(space, space.newtuple(bases_w)) + is_heaptype = bool(pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) + pto.c_tp_bases = make_ref(space, space.newtuple(bases_w), + immortal=not is_heaptype) def finish_type_2(space, pto, w_obj): """ @@ -948,7 +950,7 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) - w_dict = w_obj.getdict(space) + #w_dict = w_obj.getdict(space) # pass in the w_obj to convert any values that are # unbound GetSetProperty into bound PyGetSetDescrObject #pto.c_tp_dict = make_ref(space, w_dict, w_obj) From pypy.commits at gmail.com Tue Jul 25 05:59:28 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 25 Jul 2017 02:59:28 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Tweak CPyBuffer.releasebuffer() to make ll2ctypes happy Message-ID: <59771680.d0141c0a.e04af.7828@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91962:c99247c01177 Date: 2017-07-25 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/c99247c01177/ Log: Tweak CPyBuffer.releasebuffer() to make ll2ctypes happy diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -605,6 +605,7 @@ e.write_unraisable(space, where, w_obj) e.clear(space) # break up reference cycles else: + raise addrstring = w_obj.getaddrstring(space) msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % ( str(e), where, space.type(w_obj).name, addrstring)) @@ -615,7 +616,7 @@ def make_finalizer_queue(W_Root, space): """Make a FinalizerQueue subclass which responds to GC finalizer events by 'firing' the UserDelAction class above. It does not - directly fetches the objects to finalize at all; they stay in the + directly fetches the objects to finalize at all; they stay in the GC-managed queue, and will only be fetched by UserDelAction (between bytecodes).""" diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -73,19 +73,24 @@ if self.needs_decref: if self.releasebufferproc: func_target = rffi.cast(releasebufferproc, self.releasebufferproc) - with lltype.scoped_alloc(Py_buffer) as pybuf: - pybuf.c_buf = self.ptr - pybuf.c_len = self.size - pybuf.c_ndim = cts.cast('int', self.ndim) - pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) - pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) - for i in range(self.ndim): - pybuf.c_shape[i] = self.shape[i] - pybuf.c_strides[i] = self.strides[i] - with rffi.scoped_str2charp( - self.format if self.format else "B") as fmt: - pybuf.c_format = fmt - generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + size = rffi.sizeof(cts.gettype('Py_buffer')) + pybuf = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw', zero=True) + pybuf = cts.cast('Py_buffer*', pybuf) + pybuf.c_buf = self.ptr + pybuf.c_len = self.size + pybuf.c_ndim = cts.cast('int', self.ndim) + pybuf.c_shape = cts.cast('Py_ssize_t*', pybuf.c__shape) + pybuf.c_strides = cts.cast('Py_ssize_t*', pybuf.c__strides) + for i in range(self.ndim): + pybuf.c_shape[i] = self.shape[i] + pybuf.c_strides[i] = self.strides[i] + fmt = rffi.str2charp(self.format if self.format else "B") + try: + pybuf.c_format = fmt + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + finally: + lltype.free(fmt, flavor='raw') + lltype.free(pybuf, flavor='raw') decref(self.space, self.pyobj) self.pyobj = lltype.nullptr(PyObject.TO) else: diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -109,9 +109,9 @@ except leakfinder.MallocMismatch as e: result = e.args[0] filtered_result = {} - for obj in result: + for obj, value in result.iteritems(): if not is_allowed_to_leak(self.space, obj): - filtered_result[obj] = result[obj] + filtered_result[obj] = value if filtered_result: raise leakfinder.MallocMismatch(filtered_result) assert not self.space.finalizer_queue.next_dead() diff --git a/rpython/tool/leakfinder.py b/rpython/tool/leakfinder.py --- a/rpython/tool/leakfinder.py +++ b/rpython/tool/leakfinder.py @@ -6,6 +6,7 @@ # So far, this is used for lltype.malloc(flavor='raw'). TRACK_ALLOCATIONS = False ALLOCATED = {} +TB_LINES = 76 class MallocMismatch(Exception): def __str__(self): @@ -13,8 +14,8 @@ dict2 = {} for obj, traceback in dict.items(): traceback = traceback.splitlines() - if len(traceback) > 8: - traceback = [' ...'] + traceback[-6:] + if len(traceback) > TB_LINES + 2: + traceback = [' ...'] + traceback[-TB_LINES:] traceback = '\n'.join(traceback) dict2.setdefault(traceback, []) dict2[traceback].append(obj) @@ -58,7 +59,7 @@ if TRACK_ALLOCATIONS: frame = sys._getframe(framedepth) sio = cStringIO.StringIO() - traceback.print_stack(frame, limit=10, file=sio) + traceback.print_stack(frame, limit=40, file=sio) tb = sio.getvalue() ALLOCATED[obj] = tb From pypy.commits at gmail.com Tue Jul 25 06:05:56 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 25 Jul 2017 03:05:56 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: revert debugging code committed by mistake Message-ID: <59771804.13421c0a.6ea4d.2c0f@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91963:31b9aeebd66e Date: 2017-07-25 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/31b9aeebd66e/ Log: revert debugging code committed by mistake diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -605,7 +605,6 @@ e.write_unraisable(space, where, w_obj) e.clear(space) # break up reference cycles else: - raise addrstring = w_obj.getaddrstring(space) msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % ( str(e), where, space.type(w_obj).name, addrstring)) @@ -616,7 +615,7 @@ def make_finalizer_queue(W_Root, space): """Make a FinalizerQueue subclass which responds to GC finalizer events by 'firing' the UserDelAction class above. It does not - directly fetches the objects to finalize at all; they stay in the + directly fetches the objects to finalize at all; they stay in the GC-managed queue, and will only be fetched by UserDelAction (between bytecodes).""" diff --git a/rpython/tool/leakfinder.py b/rpython/tool/leakfinder.py --- a/rpython/tool/leakfinder.py +++ b/rpython/tool/leakfinder.py @@ -6,7 +6,6 @@ # So far, this is used for lltype.malloc(flavor='raw'). TRACK_ALLOCATIONS = False ALLOCATED = {} -TB_LINES = 76 class MallocMismatch(Exception): def __str__(self): @@ -14,8 +13,8 @@ dict2 = {} for obj, traceback in dict.items(): traceback = traceback.splitlines() - if len(traceback) > TB_LINES + 2: - traceback = [' ...'] + traceback[-TB_LINES:] + if len(traceback) > 8: + traceback = [' ...'] + traceback[-6:] traceback = '\n'.join(traceback) dict2.setdefault(traceback, []) dict2[traceback].append(obj) @@ -59,7 +58,7 @@ if TRACK_ALLOCATIONS: frame = sys._getframe(framedepth) sio = cStringIO.StringIO() - traceback.print_stack(frame, limit=40, file=sio) + traceback.print_stack(frame, limit=10, file=sio) tb = sio.getvalue() ALLOCATED[obj] = tb From pypy.commits at gmail.com Tue Jul 25 08:10:01 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 25 Jul 2017 05:10:01 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Be more careful with refcounts in array.c Message-ID: <59773519.97a9df0a.40b5c.5cfe@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91965:438c0c9af393 Date: 2017-07-25 14:09 +0200 http://bitbucket.org/pypy/pypy/changeset/438c0c9af393/ Log: Be more careful with refcounts in array.c diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -48,7 +48,7 @@ m as the message text. If the conversion otherwise, fails, reraise the original exception""" if isinstance(w_obj, W_ListObject): - # make sure we can return a borrowed obj from PySequence_Fast_GET_ITEM + # make sure we can return a borrowed obj from PySequence_Fast_GET_ITEM w_obj.convert_to_cpy_strategy(space) return w_obj try: @@ -313,7 +313,7 @@ self) w_clone.switch_to_object_strategy() return w_clone - + def copy_into(self, w_list, w_other): w_list.switch_to_object_strategy() w_list.strategy.copy_into(w_list, w_other) @@ -378,7 +378,7 @@ def is_empty_strategy(self): return False - + PyObjectList = lltype.Ptr(lltype.Array(PyObject, hints={'nolength': True})) diff --git a/pypy/module/cpyext/test/array.c b/pypy/module/cpyext/test/array.c --- a/pypy/module/cpyext/test/array.c +++ b/pypy/module/cpyext/test/array.c @@ -1867,6 +1867,7 @@ int n = PyList_Size(obj1); PyObject *v = getarrayitem(obj2, 0); int i = ((PyIntObject*)v)->ob_ival; + Py_DECREF(v); PyObject * ret = PyList_New(n*i); for (ii = 0; ii < i; ii++) for (nn = 0; nn < n; nn++) @@ -1883,6 +1884,7 @@ int n = PyList_Size(obj2); PyObject *v = getarrayitem(obj1, 0); int i = ((PyIntObject*)v)->ob_ival; + Py_DECREF(v); PyObject * ret = PyList_New(n*i); for (ii = 0; ii < i; ii++) for (nn = 0; nn < n; nn++) @@ -1919,6 +1921,7 @@ int n = PyList_Size(obj1); PyObject *v = getarrayitem(obj2, 0); int i = ((PyIntObject*)v)->ob_ival; + Py_DECREF(v); PyObject * ret = PyList_New(n); for (nn = 0; nn < n; nn++) { @@ -1926,7 +1929,10 @@ if (PyInt_Check(v)) PyList_SetItem(ret, nn, PyLong_FromLong(i * ((PyIntObject*)v)->ob_ival)); else + { + Py_INCREF(v); PyList_SetItem(ret, nn, v); + } } return ret; } @@ -1936,6 +1942,7 @@ int n = PyList_Size(obj2); PyObject *v = getarrayitem(obj1, 0); int i = ((PyIntObject*)v)->ob_ival; + Py_DECREF(v); PyObject * ret = PyList_New(n); for (nn = 0; nn < n; nn++) { @@ -1943,7 +1950,10 @@ if (PyInt_Check(v)) PyList_SetItem(ret, nn, PyLong_FromLong(i * ((PyIntObject*)v)->ob_ival)); else + { + Py_INCREF(v); PyList_SetItem(ret, nn, v); + } } return ret; } From pypy.commits at gmail.com Tue Jul 25 08:09:59 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 25 Jul 2017 05:09:59 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Filter out C functions Message-ID: <59773517.c90b1c0a.7a3f3.5f75@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91964:5bba19e669b0 Date: 2017-07-25 13:30 +0200 http://bitbucket.org/pypy/pypy/changeset/5bba19e669b0/ Log: Filter out C functions diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -84,10 +84,13 @@ def is_allowed_to_leak(space, obj): from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.api import cts + from pypy.module.cpyext.methodobject import W_PyCFunctionObject try: w_obj = from_ref(space, cts.cast('PyObject*', obj._as_ptr())) except: return False + if isinstance(w_obj, W_PyCFunctionObject): + return True # It's OK to "leak" some interned strings: if the pyobj is created by # the test, but the w_obj is referred to from elsewhere. return is_interned_string(space, w_obj) From pypy.commits at gmail.com Tue Jul 25 09:50:01 2017 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 25 Jul 2017 06:50:01 -0700 (PDT) Subject: [pypy-commit] pypy default: remove old files Message-ID: <59774c89.c2b81c0a.845ff.7d95@mx.google.com> Author: Richard Plangger Branch: Changeset: r91966:cb8f734c831d Date: 2017-07-23 16:27 -0400 http://bitbucket.org/pypy/pypy/changeset/cb8f734c831d/ Log: remove old files diff --git a/rpython/rlib/rvmprof/src/shared/rss_darwin.h b/rpython/rlib/rvmprof/src/shared/rss_darwin.h deleted file mode 100644 --- a/rpython/rlib/rvmprof/src/shared/rss_darwin.h +++ /dev/null @@ -1,31 +0,0 @@ -/* On OS X we can get RSS using the Mach API. */ -#include -#include -#include -#include - -static mach_port_t mach_task; - -static int setup_rss(void) -{ - mach_task = mach_task_self(); - return 0; -} - -static int teardown_rss(void) -{ - return 0; -} - -static long get_current_proc_rss(void) -{ - mach_msg_type_number_t out_count = MACH_TASK_BASIC_INFO_COUNT; - mach_task_basic_info_data_t taskinfo = { .resident_size = 0 }; - - kern_return_t error = task_info(mach_task, MACH_TASK_BASIC_INFO, (task_info_t)&taskinfo, &out_count); - if (error == KERN_SUCCESS) { - return (long)(taskinfo.resident_size / 1024); - } else { - return -1; - } -} diff --git a/rpython/rlib/rvmprof/src/shared/rss_unix.h b/rpython/rlib/rvmprof/src/shared/rss_unix.h deleted file mode 100644 --- a/rpython/rlib/rvmprof/src/shared/rss_unix.h +++ /dev/null @@ -1,38 +0,0 @@ -#include - -/* On normal Unices we can get RSS from '/proc//status'. */ -static int proc_file = -1; - -static int setup_rss(void) -{ - char buf[128]; - - sprintf(buf, "/proc/%d/status", getpid()); - proc_file = open(buf, O_RDONLY); - return proc_file; -} - -static int teardown_rss(void) { - close(proc_file); - proc_file = -1; - return 0; -} - -static long get_current_proc_rss(void) -{ - char buf[1024]; - int i = 0; - - if (lseek(proc_file, 0, SEEK_SET) == -1) - return -1; - if (read(proc_file, buf, 1024) == -1) - return -1; - while (i < 1020) { - if (strncmp(buf + i, "VmRSS:\t", 7) == 0) { - i += 7; - return atoi(buf + i); - } - i++; - } - return -1; -} diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.c b/rpython/rlib/rvmprof/src/shared/vmprof_main.c deleted file mode 100644 --- a/rpython/rlib/rvmprof/src/shared/vmprof_main.c +++ /dev/null @@ -1,30 +0,0 @@ -#ifdef VMPROF_UNIX - -#include -/* value: LSB bit is 1 if signals must be ignored; all other bits - are a counter for how many threads are currently in a signal handler */ -static long volatile signal_handler_value = 1; - -void vmprof_ignore_signals(int ignored) -{ - if (!ignored) { - __sync_fetch_and_and(&signal_handler_value, ~1L); - } else { - /* set the last bit, and wait until concurrently-running signal - handlers finish */ - while (__sync_or_and_fetch(&signal_handler_value, 1L) != 1L) { - usleep(1); - } - } -} - -long vmprof_enter_signal(void) -{ - return __sync_fetch_and_add(&signal_handler_value, 2L); -} - -long vmprof_exit_signal(void) -{ - return __sync_sub_and_fetch(&signal_handler_value, 2L); -} -#endif diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main.h b/rpython/rlib/rvmprof/src/shared/vmprof_main.h deleted file mode 100644 --- a/rpython/rlib/rvmprof/src/shared/vmprof_main.h +++ /dev/null @@ -1,549 +0,0 @@ -#pragma once - -/* VMPROF - * - * statistical sampling profiler specifically designed to profile programs - * which run on a Virtual Machine and/or bytecode interpreter, such as Python, - * etc. - * - * The logic to dump the C stack traces is partly stolen from the code in - * gperftools. - * The file "getpc.h" has been entirely copied from gperftools. - * - * Tested only on gcc, linux, x86_64. - * - * Copyright (C) 2014-2017 - * Antonio Cuni - anto.cuni at gmail.com - * Maciej Fijalkowski - fijall at gmail.com - * Armin Rigo - arigo at tunes.org - * Richard Plangger - planrichi at gmail.com - * - */ - -#define _GNU_SOURCE 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "vmprof.h" - -#include "vmp_stack.h" -#include "vmprof_getpc.h" -#include "vmprof_mt.h" -#include "vmprof_common.h" -#include "compat.h" - -#if defined(__unix__) -#include "rss_unix.h" -#elif defined(__APPLE__) -#include "rss_darwin.h" -#endif - -#if VMPROF_LINUX -#include -#endif - -/************************************************************/ - -static void *(*mainloop_get_virtual_ip)(char *) = 0; -static int opened_profile(const char *interp_name, int memory, int proflines, int native, int real_time); -static void flush_codes(void); - -/************************************************************/ - -RPY_EXTERN void vmprof_ignore_signals(int ignored); -RPY_EXTERN long vmprof_enter_signal(void); -RPY_EXTERN long vmprof_exit_signal(void); - -/* ************************************************************* - * functions to write a profile file compatible with gperftools - * ************************************************************* - */ - -static char atfork_hook_installed = 0; - - -/* ************************************************************* - * functions to dump the stack trace - * ************************************************************* - */ - -int get_stack_trace(PY_THREAD_STATE_T * current, void** result, int max_depth, intptr_t pc) -{ - PY_STACK_FRAME_T * frame; -#ifdef RPYTHON_VMPROF - // do nothing here, - frame = (PY_STACK_FRAME_T*)current; -#else - if (current == NULL) { - fprintf(stderr, "WARNING: get_stack_trace, current is NULL\n"); - return 0; - } - frame = current->frame; -#endif - if (frame == NULL) { - fprintf(stderr, "WARNING: get_stack_trace, frame is NULL\n"); - return 0; - } - return vmp_walk_and_record_stack(frame, result, max_depth, 1, pc); -} - -/* ************************************************************* - * the signal handler - * ************************************************************* - */ - -#include - -volatile int spinlock; -jmp_buf restore_point; - -static void segfault_handler(int arg) -{ - longjmp(restore_point, SIGSEGV); -} - -int _vmprof_sample_stack(struct profbuf_s *p, PY_THREAD_STATE_T * tstate, ucontext_t * uc) -{ - int depth; - struct prof_stacktrace_s *st = (struct prof_stacktrace_s *)p->data; - st->marker = MARKER_STACKTRACE; - st->count = 1; -#ifdef RPYTHON_VMPROF - depth = get_stack_trace(get_vmprof_stack(), st->stack, MAX_STACK_DEPTH-1, (intptr_t)GetPC(uc)); -#else - depth = get_stack_trace(tstate, st->stack, MAX_STACK_DEPTH-1, (intptr_t)NULL); -#endif - if (depth == 0) { - return 0; - } - st->depth = depth; - st->stack[depth++] = tstate; - long rss = get_current_proc_rss(); - if (rss >= 0) - st->stack[depth++] = (void*)rss; - p->data_offset = offsetof(struct prof_stacktrace_s, marker); - p->data_size = (depth * sizeof(void *) + - sizeof(struct prof_stacktrace_s) - - offsetof(struct prof_stacktrace_s, marker)); - return 1; -} - -#ifndef RPYTHON_VMPROF -static PY_THREAD_STATE_T * _get_pystate_for_this_thread(void) { - // see issue 116 on github.com/vmprof/vmprof-python. - // PyGILState_GetThisThreadState(); can hang forever - // - PyInterpreterState * istate; - PyThreadState * state; - long mythread_id; - - mythread_id = PyThread_get_thread_ident(); - istate = PyInterpreterState_Head(); - if (istate == NULL) { - fprintf(stderr, "WARNING: interp state head is null (for thread id %ld)\n", mythread_id); - return NULL; - } - // fish fish fish, it will NOT lock the keymutex in pythread - do { - state = PyInterpreterState_ThreadHead(istate); - do { - if (state->thread_id == mythread_id) { - return state; - } - } while ((state = PyThreadState_Next(state)) != NULL); - } while ((istate = PyInterpreterState_Next(istate)) != NULL); - - // uh? not found? - fprintf(stderr, "WARNING: cannot find thread state (for thread id %ld), sample will be thrown away\n", mythread_id); - return NULL; -} -#endif - -#ifdef VMPROF_UNIX -static int broadcast_signal_for_threads(void) -{ - int done = 1; - size_t i = 0; - pthread_t self = pthread_self(); - pthread_t tid; - while (i < thread_count) { - tid = threads[i]; - if (pthread_equal(tid, self)) { - done = 0; - } else if (pthread_kill(tid, SIGALRM)) { - remove_thread(tid, i); - } - i++; - } - return done; -} -#endif - -#ifdef VMPROF_LINUX -static inline int is_main_thread(void) -{ - pid_t pid = getpid(); - pid_t tid = (pid_t) syscall(SYS_gettid); - return (pid == tid); -} -#endif - -#ifdef VMPROF_APPLE -static inline int is_main_thread(void) -{ - return pthread_main_np(); -} -#endif - -static void sigprof_handler(int sig_nr, siginfo_t* info, void *ucontext) -{ - int commit; - PY_THREAD_STATE_T * tstate = NULL; - void (*prevhandler)(int); - -#ifndef RPYTHON_VMPROF - - // Even though the docs say that this function call is for 'esoteric use' - // it seems to be correctly set when the interpreter is teared down! - if (!Py_IsInitialized()) { - return; - } - - // TERRIBLE HACK AHEAD - // on OS X, the thread local storage is sometimes uninitialized - // when the signal handler runs - it means it's impossible to read errno - // or call any syscall or read PyThread_Current or pthread_self. Additionally, - // it seems impossible to read the register gs. - // here we register segfault handler (all guarded by a spinlock) and call - // longjmp in case segfault happens while reading a thread local - // - // We do the same error detection for linux to ensure that - // get_current_thread_state returns a sane result - while (__sync_lock_test_and_set(&spinlock, 1)) { - } - -#ifdef VMPROF_UNIX - // SIGNAL ABUSE AHEAD - // On linux, the prof timer will deliver the signal to the thread which triggered the timer, - // because these timers are based on process and system time, and as such, are thread-aware. - // For the real timer, the signal gets delivered to the main thread, seemingly always. - // Consequently if we want to sample multiple threads, we need to forward this signal. - if (signal_type == SIGALRM) { - if (is_main_thread() && broadcast_signal_for_threads()) { - __sync_lock_release(&spinlock); - return; - } - } -#endif - - prevhandler = signal(SIGSEGV, &segfault_handler); - int fault_code = setjmp(restore_point); - if (fault_code == 0) { - pthread_self(); - tstate = _get_pystate_for_this_thread(); - } else { - signal(SIGSEGV, prevhandler); - __sync_lock_release(&spinlock); - return; - } - signal(SIGSEGV, prevhandler); - __sync_lock_release(&spinlock); -#endif - - long val = vmprof_enter_signal(); - - if ((val & 1) == 0) { - int saved_errno = errno; - int fd = vmp_profile_fileno(); - assert(fd >= 0); - - struct profbuf_s *p = reserve_buffer(fd); - if (p == NULL) { - /* ignore this signal: there are no free buffers right now */ - } else { -#ifdef RPYTHON_VMPROF - commit = _vmprof_sample_stack(p, NULL, (ucontext_t*)ucontext); -#else - commit = _vmprof_sample_stack(p, tstate, (ucontext_t*)ucontext); -#endif - if (commit) { - commit_buffer(fd, p); - } else { -#ifndef RPYTHON_VMPROF - fprintf(stderr, "WARNING: canceled buffer, no stack trace was written %d\n", is_enabled); -#else - fprintf(stderr, "WARNING: canceled buffer, no stack trace was written\n"); -#endif - cancel_buffer(p); - } - } - - errno = saved_errno; - } - - vmprof_exit_signal(); -} - - - -/* ************************************************************* - * the setup and teardown functions - * ************************************************************* - */ - -static int install_sigprof_handler(void) -{ - struct sigaction sa; - memset(&sa, 0, sizeof(sa)); - sa.sa_sigaction = sigprof_handler; - sa.sa_flags = SA_RESTART | SA_SIGINFO; - if (sigemptyset(&sa.sa_mask) == -1 || - sigaction(signal_type, &sa, NULL) == -1) - return -1; - return 0; -} - -static int remove_sigprof_handler(void) -{ - struct sigaction ign_sigint, prev; - ign_sigint.sa_handler = SIG_IGN; - ign_sigint.sa_flags = 0; - sigemptyset(&ign_sigint.sa_mask); - - if (sigaction(signal_type, &ign_sigint, NULL) < 0) { - fprintf(stderr, "Could not remove the signal handler (for profiling)\n"); - return -1; - } - return 0; -} - -static int install_sigprof_timer(void) -{ - static struct itimerval timer; - timer.it_interval.tv_sec = 0; - timer.it_interval.tv_usec = (int)profile_interval_usec; - timer.it_value = timer.it_interval; - if (setitimer(itimer_type, &timer, NULL) != 0) - return -1; - return 0; -} - -static int remove_sigprof_timer(void) { - static struct itimerval timer; - timerclear(&(timer.it_interval)); - timerclear(&(timer.it_value)); - if (setitimer(itimer_type, &timer, NULL) != 0) { - fprintf(stderr, "Could not disable the signal handler (for profiling)\n"); - return -1; - } - return 0; -} - -static void atfork_disable_timer(void) { - if (profile_interval_usec > 0) { - remove_sigprof_timer(); -#ifndef RPYTHON_VMPROF - is_enabled = 0; -#endif - } -} - -static void atfork_enable_timer(void) { - if (profile_interval_usec > 0) { - install_sigprof_timer(); -#ifndef RPYTHON_VMPROF - is_enabled = 1; -#endif - } -} - -static void atfork_close_profile_file(void) { - int fd = vmp_profile_fileno(); - if (fd != -1) - close(fd); - vmp_set_profile_fileno(-1); -} - -static int install_pthread_atfork_hooks(void) { - /* this is needed to prevent the problems described there: - - http://code.google.com/p/gperftools/issues/detail?id=278 - - http://lists.debian.org/debian-glibc/2010/03/msg00161.html - - TL;DR: if the RSS of the process is large enough, the clone() syscall - will be interrupted by the SIGPROF before it can complete, then - retried, interrupted again and so on, in an endless loop. The - solution is to disable the timer around the fork, and re-enable it - only inside the parent. - */ - if (atfork_hook_installed) - return 0; - int ret = pthread_atfork(atfork_disable_timer, atfork_enable_timer, atfork_close_profile_file); - if (ret != 0) - return -1; - atfork_hook_installed = 1; - return 0; -} - -#ifdef VMP_SUPPORTS_NATIVE_PROFILING -void init_cpyprof(int native) -{ - // skip this if native should not be enabled - if (!native) { - vmp_native_disable(); - return; - } - vmp_native_enable(); -} - -static void disable_cpyprof(void) -{ - vmp_native_disable(); -} -#endif - -RPY_EXTERN -int vmprof_enable(int memory, int native, int real_time) -{ -#ifdef VMP_SUPPORTS_NATIVE_PROFILING - init_cpyprof(native); -#endif - assert(vmp_profile_fileno() >= 0); - assert(prepare_interval_usec > 0); - profile_interval_usec = prepare_interval_usec; - if (memory && setup_rss() == -1) - goto error; -#if VMPROF_UNIX - if (real_time && insert_thread(pthread_self(), -1) == -1) - goto error; -#endif - if (install_pthread_atfork_hooks() == -1) - goto error; - if (install_sigprof_handler() == -1) - goto error; - if (install_sigprof_timer() == -1) - goto error; - vmprof_ignore_signals(0); - return 0; - - error: - vmp_set_profile_fileno(-1); - profile_interval_usec = 0; - return -1; -} - - -int close_profile(void) -{ - int fileno = vmp_profile_fileno(); - fsync(fileno); - (void)vmp_write_time_now(MARKER_TRAILER); - teardown_rss(); - - /* don't close() the file descriptor from here */ - vmp_set_profile_fileno(-1); - return 0; -} - -RPY_EXTERN -int vmprof_disable(void) -{ - vmprof_ignore_signals(1); - profile_interval_usec = 0; -#ifdef VMP_SUPPORTS_NATIVE_PROFILING - disable_cpyprof(); -#endif - - if (remove_sigprof_timer() == -1) { - return -1; - } - if (remove_sigprof_handler() == -1) { - return -1; - } -#ifdef VMPROF_UNIX - if ((signal_type == SIGALRM) && remove_threads() == -1) { - return -1; - } -#endif - flush_codes(); - if (shutdown_concurrent_bufs(vmp_profile_fileno()) < 0) - return -1; - return close_profile(); -} - -RPY_EXTERN -int vmprof_register_virtual_function(char *code_name, intptr_t code_uid, - int auto_retry) -{ - long namelen = strnlen(code_name, 1023); - long blocklen = 1 + sizeof(intptr_t) + sizeof(long) + namelen; - struct profbuf_s *p; - char *t; - - retry: - p = current_codes; - if (p != NULL) { - if (__sync_bool_compare_and_swap(¤t_codes, p, NULL)) { - /* grabbed 'current_codes': we will append the current block - to it if it contains enough room */ - size_t freesize = SINGLE_BUF_SIZE - p->data_size; - if (freesize < (size_t)blocklen) { - /* full: flush it */ - commit_buffer(vmp_profile_fileno(), p); - p = NULL; - } - } - else { - /* compare-and-swap failed, don't try again */ - p = NULL; - } - } - - if (p == NULL) { - p = reserve_buffer(vmp_profile_fileno()); - if (p == NULL) { - /* can't get a free block; should almost never be the - case. Spin loop if allowed, or return a failure code - if not (e.g. we're in a signal handler) */ - if (auto_retry > 0) { - auto_retry--; - usleep(1); - goto retry; - } - return -1; - } - } - - t = p->data + p->data_size; - p->data_size += blocklen; - assert(p->data_size <= SINGLE_BUF_SIZE); - *t++ = MARKER_VIRTUAL_IP; - memcpy(t, &code_uid, sizeof(intptr_t)); t += sizeof(intptr_t); - memcpy(t, &namelen, sizeof(long)); t += sizeof(long); - memcpy(t, code_name, namelen); - - /* try to reattach 'p' to 'current_codes' */ - if (!__sync_bool_compare_and_swap(¤t_codes, NULL, p)) { - /* failed, flush it */ - commit_buffer(vmp_profile_fileno(), p); - } - return 0; -} - -static void flush_codes(void) -{ - struct profbuf_s *p = current_codes; - if (p != NULL) { - current_codes = NULL; - commit_buffer(vmp_profile_fileno(), p); - } -} diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main_win32.c b/rpython/rlib/rvmprof/src/shared/vmprof_main_win32.c deleted file mode 100644 --- a/rpython/rlib/rvmprof/src/shared/vmprof_main_win32.c +++ /dev/null @@ -1,42 +0,0 @@ -// cannot include this header because it also has definitions -#include "windows.h" -#include "compat.h" -#include "vmp_stack.h" - -HANDLE write_mutex; - -int prepare_concurrent_bufs(void) -{ - if (!(write_mutex = CreateMutex(NULL, FALSE, NULL))) - return -1; - return 0; -} - -#include - -int vmp_write_all(const char *buf, size_t bufsize) -{ - int res; - int fd; - int count; - - res = WaitForSingleObject(write_mutex, INFINITE); - fd = vmp_profile_fileno(); - - if (fd == -1) { - ReleaseMutex(write_mutex); - return -1; - } - while (bufsize > 0) { - count = _write(fd, buf, (long)bufsize); - if (count <= 0) { - ReleaseMutex(write_mutex); - return -1; /* failed */ - } - buf += count; - bufsize -= count; - } - ReleaseMutex(write_mutex); - return 0; -} - diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_main_win32.h b/rpython/rlib/rvmprof/src/shared/vmprof_main_win32.h deleted file mode 100644 --- a/rpython/rlib/rvmprof/src/shared/vmprof_main_win32.h +++ /dev/null @@ -1,203 +0,0 @@ -#pragma once - -#include "windows.h" -#include "compat.h" -#include "vmp_stack.h" - -HANDLE write_mutex; - -int prepare_concurrent_bufs(void); - -#include "vmprof_common.h" -#include - -// This file has been inspired (but not copied from since the LICENSE -// would not allow it) from verysleepy profiler - -volatile int thread_started = 0; -volatile int enabled = 0; - -int vmp_write_all(const char *buf, size_t bufsize); - -#ifdef RPYTHON_VMPROF -typedef struct pypy_threadlocal_s PY_WIN_THREAD_STATE; -#else -typedef PyThreadState PY_WIN_THREAD_STATE; -#endif - - -RPY_EXTERN -int vmprof_register_virtual_function(char *code_name, intptr_t code_uid, - int auto_retry) -{ - char buf[2048]; - long namelen; - - namelen = (long)strnlen(code_name, 1023); - buf[0] = MARKER_VIRTUAL_IP; - *(intptr_t*)(buf + 1) = code_uid; - *(long*)(buf + 1 + sizeof(intptr_t)) = namelen; - memcpy(buf + 1 + sizeof(intptr_t) + sizeof(long), code_name, namelen); - vmp_write_all(buf, 1 + sizeof(intptr_t) + sizeof(long) + namelen); - return 0; -} - -int vmprof_snapshot_thread(DWORD thread_id, PY_WIN_THREAD_STATE *tstate, prof_stacktrace_s *stack) -{ - HRESULT result; - HANDLE hThread; - int depth; - CONTEXT ctx; -#ifdef RPYTHON_LL2CTYPES - return 0; // not much we can do -#else -#if !defined(RPY_TLOFS_thread_ident) && defined(RPYTHON_VMPROF) - return 0; // we can't freeze threads, unsafe -#else - hThread = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id); - if (!hThread) { - return -1; - } - result = SuspendThread(hThread); - if(result == 0xffffffff) - return -1; // possible, e.g. attached debugger or thread alread suspended - // find the correct thread -#ifdef RPYTHON_VMPROF - ctx.ContextFlags = CONTEXT_FULL; - if (!GetThreadContext(hThread, &ctx)) - return -1; - depth = get_stack_trace(tstate->vmprof_tl_stack, - stack->stack, MAX_STACK_DEPTH-2, ctx.Eip); - stack->depth = depth; - stack->stack[depth++] = thread_id; - stack->count = 1; - stack->marker = MARKER_STACKTRACE; - ResumeThread(hThread); - return depth; -#else - depth = vmp_walk_and_record_stack(tstate->frame, stack->stack, - MAX_STACK_DEPTH, 0, 0); - stack->depth = depth; - stack->stack[depth++] = (void*)((ULONG_PTR)thread_id); - stack->count = 1; - stack->marker = MARKER_STACKTRACE; - ResumeThread(hThread); - return depth; -#endif - -#endif -#endif -} - -#ifndef RPYTHON_VMPROF -static -PY_WIN_THREAD_STATE * get_current_thread_state(void) -{ -#if PY_MAJOR_VERSION < 3 - return _PyThreadState_Current; -#elif PY_VERSION_HEX < 0x03050200 - return (PyThreadState*) _Py_atomic_load_relaxed(&_PyThreadState_Current); -#else - return _PyThreadState_UncheckedGet(); -#endif -} -#endif - -long __stdcall vmprof_mainloop(void *arg) -{ -#ifdef RPYTHON_LL2CTYPES - // for tests only - return 0; -#else - // it is not a test case! - PY_WIN_THREAD_STATE *tstate; - HANDLE hThreadSnap = INVALID_HANDLE_VALUE; - prof_stacktrace_s *stack = (prof_stacktrace_s*)malloc(SINGLE_BUF_SIZE); - int depth; -#ifndef RPYTHON_VMPROF - // cpython version - while (1) { - Sleep(profile_interval_usec * 1000); - if (!enabled) { - continue; - } - tstate = get_current_thread_state(); - if (!tstate) - continue; - depth = vmprof_snapshot_thread(tstate->thread_id, tstate, stack); - if (depth > 0) { - vmp_write_all((char*)stack + offsetof(prof_stacktrace_s, marker), - SIZEOF_PROF_STACKTRACE + depth * sizeof(void*)); - } - } -#else - // pypy version - while (1) { - //Sleep(profile_interval_usec * 1000); - Sleep(10); - if (!enabled) { - continue; - } - _RPython_ThreadLocals_Acquire(); - tstate = _RPython_ThreadLocals_Head(); // the first one is one behind head - tstate = _RPython_ThreadLocals_Enum(tstate); - while (tstate) { - if (tstate->ready == 42) { - depth = vmprof_snapshot_thread(tstate->thread_ident, tstate, stack); - if (depth > 0) { - vmp_write_all((char*)stack + offsetof(prof_stacktrace_s, marker), - depth * sizeof(void *) + - sizeof(struct prof_stacktrace_s) - - offsetof(struct prof_stacktrace_s, marker)); - } - } - tstate = _RPython_ThreadLocals_Enum(tstate); - } - _RPython_ThreadLocals_Release(); - } -#endif -#endif -} - -RPY_EXTERN -int vmprof_enable(int memory, int native, int real_time) -{ - if (!thread_started) { - if (!CreateThread(NULL, 0, vmprof_mainloop, NULL, 0, NULL)) { - return -1; - } - thread_started = 1; - } - enabled = 1; - return 0; -} - -RPY_EXTERN -int vmprof_disable(void) -{ - char marker = MARKER_TRAILER; - (void)vmp_write_time_now(MARKER_TRAILER); - - enabled = 0; - vmp_set_profile_fileno(-1); - return 0; -} - -RPY_EXTERN -void vmprof_ignore_signals(int ignored) -{ - enabled = !ignored; -} - -int vmp_native_enable(void) { - return 0; -} - -void vmp_native_disable(void) { -} - -int get_stack_trace(PY_WIN_THREAD_STATE * current, void** result, - int max_depth, intptr_t pc) -{ - return 0; -} From pypy.commits at gmail.com Tue Jul 25 09:50:05 2017 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 25 Jul 2017 06:50:05 -0700 (PDT) Subject: [pypy-commit] pypy default: remove write_all_code_objects, this method is not called when it does not exist Message-ID: <59774c8d.428bdf0a.e9a7f.5cca@mx.google.com> Author: Richard Plangger Branch: Changeset: r91968:ac3af78f56db Date: 2017-07-23 18:22 -0400 http://bitbucket.org/pypy/pypy/changeset/ac3af78f56db/ Log: remove write_all_code_objects, this method is not called when it does not exist diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py --- a/pypy/module/_vmprof/__init__.py +++ b/pypy/module/_vmprof/__init__.py @@ -11,7 +11,6 @@ interpleveldefs = { 'enable': 'interp_vmprof.enable', 'disable': 'interp_vmprof.disable', - 'write_all_code_objects': 'interp_vmprof.write_all_code_objects', 'is_enabled': 'interp_vmprof.is_enabled', 'get_profile_path': 'interp_vmprof.get_profile_path', 'stop_sampling': 'interp_vmprof.stop_sampling', diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -70,11 +70,6 @@ except rvmprof.VMProfError as e: raise VMProfError(space, e) -def write_all_code_objects(space): - """ Needed on cpython, just empty function here - """ - pass - def disable(space): """Disable vmprof. Remember to close the file descriptor afterwards if necessary. From pypy.commits at gmail.com Tue Jul 25 09:50:03 2017 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 25 Jul 2017 06:50:03 -0700 (PDT) Subject: [pypy-commit] pypy default: reapply fix Message-ID: <59774c8b.865d1c0a.437e5.1101@mx.google.com> Author: Richard Plangger Branch: Changeset: r91967:e19ef006ba32 Date: 2017-07-23 16:46 -0400 http://bitbucket.org/pypy/pypy/changeset/e19ef006ba32/ Log: reapply fix diff too long, truncating to 2000 out of 2191 lines diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -115,3 +115,31 @@ assert fd1.read() == tmpfile.read() _vmprof.disable() assert _vmprof.get_profile_path() is None + + def test_stop_sampling(self): + import os + import _vmprof + tmpfile = open(self.tmpfilename, 'wb') + native = 1 + def f(): + import sys + import math + j = sys.maxsize + for i in range(500): + j = math.sqrt(j) + _vmprof.enable(tmpfile.fileno(), 0.01, 0, native, 0, 0) + # get_vmprof_stack() always returns 0 here! + # see vmprof_common.c and assume RPYTHON_LL2CTYPES is defined! + f() + fileno = _vmprof.stop_sampling() + pos = os.lseek(fileno, 0, os.SEEK_CUR) + f() + pos2 = os.lseek(fileno, 0, os.SEEK_CUR) + assert pos == pos2 + _vmprof.start_sampling() + f() + fileno = _vmprof.stop_sampling() + pos3 = os.lseek(fileno, 0, os.SEEK_CUR) + assert pos3 > pos + _vmprof.disable() + diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -20,7 +20,8 @@ compile_extra = ['-DRPYTHON_VMPROF', '-O3'] separate_module_files = [ - SHARED.join('symboltable.c') + SHARED.join('symboltable.c'), + SHARED.join('vmprof_unix.c') ] if sys.platform.startswith('linux'): separate_module_files += [ @@ -40,7 +41,7 @@ compile_extra += ['-DVMPROF_LINUX'] elif sys.platform == 'win32': compile_extra = ['-DRPYTHON_VMPROF', '-DVMPROF_WINDOWS'] - separate_module_files = [SHARED.join('vmprof_main_win32.c')] + separate_module_files = [SHARED.join('vmprof_win.c')] _libs = [] else: # Guessing a BSD-like Unix platform @@ -58,7 +59,9 @@ SHARED.join('compat.c'), SHARED.join('machine.c'), SHARED.join('vmp_stack.c'), - SHARED.join('vmprof_main.c'), + SHARED.join('vmprof_mt.c'), + SHARED.join('vmprof_memory.c'), + SHARED.join('vmprof_common.c'), # symbol table already in separate_module_files ] + separate_module_files, post_include_bits=[], diff --git a/rpython/rlib/rvmprof/src/rvmprof.c b/rpython/rlib/rvmprof/src/rvmprof.c --- a/rpython/rlib/rvmprof/src/rvmprof.c +++ b/rpython/rlib/rvmprof/src/rvmprof.c @@ -15,9 +15,9 @@ #include "shared/vmprof_get_custom_offset.h" #ifdef VMPROF_UNIX -#include "shared/vmprof_main.h" +#include "shared/vmprof_unix.h" #else -#include "shared/vmprof_main_win32.h" +#include "shared/vmprof_win.h" #endif diff --git a/rpython/rlib/rvmprof/src/shared/_vmprof.c b/rpython/rlib/rvmprof/src/shared/_vmprof.c --- a/rpython/rlib/rvmprof/src/shared/_vmprof.c +++ b/rpython/rlib/rvmprof/src/shared/_vmprof.c @@ -9,8 +9,8 @@ #include #include "_vmprof.h" +#include "vmprof_common.h" -static volatile int is_enabled = 0; static destructor Original_code_dealloc = 0; static PyObject* (*_default_eval_loop)(PyFrameObject *, int) = 0; @@ -18,9 +18,9 @@ #include "trampoline.h" #include "machine.h" #include "symboltable.h" -#include "vmprof_main.h" +#include "vmprof_unix.h" #else -#include "vmprof_main_win32.h" +#include "vmprof_win.h" #endif #include "vmp_stack.h" @@ -156,7 +156,7 @@ static void cpyprof_code_dealloc(PyObject *co) { - if (is_enabled) { + if (vmprof_is_enabled()) { emit_code_object((PyCodeObject *)co); /* xxx error return values are ignored */ } @@ -187,7 +187,7 @@ return NULL; } - if (is_enabled) { + if (vmprof_is_enabled()) { PyErr_SetString(PyExc_ValueError, "vmprof is already enabled"); return NULL; } @@ -217,13 +217,13 @@ return NULL; } - is_enabled = 1; + vmprof_set_enabled(1); Py_RETURN_NONE; } static PyObject * vmp_is_enabled(PyObject *module, PyObject *noargs) { - if (is_enabled) { + if (vmprof_is_enabled()) { Py_RETURN_TRUE; } Py_RETURN_FALSE; @@ -237,7 +237,7 @@ return NULL; } - is_enabled = 0; + vmprof_set_enabled(0); if (PyErr_Occurred()) return NULL; @@ -362,7 +362,7 @@ #ifdef VMPROF_UNIX static PyObject * vmp_get_profile_path(PyObject *module, PyObject *noargs) { PyObject * o; - if (is_enabled) { + if (vmprof_is_enabled()) { char buffer[4096]; buffer[0] = 0; ssize_t buffer_len = vmp_fd_to_path(vmp_profile_fileno(), buffer, 4096); @@ -382,21 +382,19 @@ insert_real_time_thread(PyObject *module, PyObject * noargs) { ssize_t thread_count; - if (!is_enabled) { + if (!vmprof_is_enabled()) { PyErr_SetString(PyExc_ValueError, "vmprof is not enabled"); return NULL; } - if (signal_type != SIGALRM) { + if (vmprof_get_signal_type() != SIGALRM) { PyErr_SetString(PyExc_ValueError, "vmprof is not in real time mode"); return NULL; } - while (__sync_lock_test_and_set(&spinlock, 1)) { - } - + vmprof_aquire_lock(); thread_count = insert_thread(pthread_self(), -1); - __sync_lock_release(&spinlock); + vmprof_release_lock(); return PyLong_FromSsize_t(thread_count); } @@ -405,21 +403,19 @@ remove_real_time_thread(PyObject *module, PyObject * noargs) { ssize_t thread_count; - if (!is_enabled) { + if (!vmprof_is_enabled()) { PyErr_SetString(PyExc_ValueError, "vmprof is not enabled"); return NULL; } - if (signal_type != SIGALRM) { + if (vmprof_get_signal_type() != SIGALRM) { PyErr_SetString(PyExc_ValueError, "vmprof is not in real time mode"); return NULL; } - while (__sync_lock_test_and_set(&spinlock, 1)) { - } - + vmprof_aquire_lock(); thread_count = remove_thread(pthread_self(), -1); - __sync_lock_release(&spinlock); + vmprof_release_lock(); return PyLong_FromSsize_t(thread_count); } diff --git a/rpython/rlib/rvmprof/src/shared/machine.c b/rpython/rlib/rvmprof/src/shared/machine.c --- a/rpython/rlib/rvmprof/src/shared/machine.c +++ b/rpython/rlib/rvmprof/src/shared/machine.c @@ -27,6 +27,8 @@ #endif #elif __linux__ return "linux"; +#elif __FreeBSD__ + return "freebsd" #else #error "Unknown compiler" #endif @@ -38,7 +40,7 @@ char proffs[24]; (void)snprintf(proffs, 24, "/proc/self/fd/%d", fd); return readlink(proffs, buffer, buffer_len); -#elif defined(VMPROF_UNIX) +#elif defined(VMPROF_UNIX) && !defined(__FreeBSD__) fcntl(fd, F_GETPATH, buffer); return strlen(buffer); #endif diff --git a/rpython/rlib/rvmprof/src/shared/vmp_stack.c b/rpython/rlib/rvmprof/src/shared/vmp_stack.c --- a/rpython/rlib/rvmprof/src/shared/vmp_stack.c +++ b/rpython/rlib/rvmprof/src/shared/vmp_stack.c @@ -523,7 +523,7 @@ int vmp_native_enable(void) { #ifdef VMPROF_LINUX - if (!unw_get_reg) { + if (libhandle == NULL) { if ((libhandle = dlopen(LIBUNWIND, RTLD_LAZY | RTLD_LOCAL)) == NULL) { goto bail_out; } @@ -570,6 +570,7 @@ vmprof_error = dlerror(); fprintf(stderr, "could not close libunwind at runtime. error: %s\n", vmprof_error); } + libhandle = NULL; } vmp_native_traces_enabled = 0; diff --git a/rpython/rlib/rvmprof/src/shared/vmprof.h b/rpython/rlib/rvmprof/src/shared/vmprof.h --- a/rpython/rlib/rvmprof/src/shared/vmprof.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof.h @@ -1,5 +1,11 @@ #pragma once +#define _GNU_SOURCE 1 + +#ifndef RPYTHON_VMPROF +#include +#endif + #ifdef VMPROF_UNIX #include #endif @@ -79,3 +85,7 @@ #endif +void set_current_codes(void * to); +int opened_profile(const char *interp_name, int memory, int proflines, int native, int real_time); +void flush_codes(void); + diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_common.c b/rpython/rlib/rvmprof/src/shared/vmprof_common.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_common.c @@ -0,0 +1,303 @@ +#include "vmprof_common.h" + +#include +#include + +#ifdef RPYTHON_VMPROF +#ifdef RPYTHON_LL2CTYPES + /* only for testing: ll2ctypes sets RPY_EXTERN from the command-line */ + +#else +# include "common_header.h" +# include "structdef.h" +# include "src/threadlocal.h" +# include "rvmprof.h" +# include "forwarddecl.h" +#endif +#endif + +#ifdef VMP_SUPPORTS_NATIVE_PROFILING +#include "vmp_stack.h" // reduces warings +#endif + + +static volatile int is_enabled = 0; +static long prepare_interval_usec = 0; +static long profile_interval_usec = 0; + +#ifdef VMPROF_UNIX +static int signal_type = SIGPROF; +static int itimer_type = ITIMER_PROF; +static pthread_t *threads = NULL; +static size_t threads_size = 0; +static size_t thread_count = 0; +static size_t threads_size_step = 8; +#endif + +int vmprof_get_itimer_type(void) { + return itimer_type; +} + +int vmprof_is_enabled(void) { + return is_enabled; +} + +void vmprof_set_enabled(int value) { + is_enabled = value; +} + +long vmprof_get_prepare_interval_usec(void) { + return prepare_interval_usec; +} + +long vmprof_get_profile_interval_usec(void) { + return profile_interval_usec; +} + +void vmprof_set_prepare_interval_usec(long value) { + prepare_interval_usec = value; +} + +void vmprof_set_profile_interval_usec(long value) { + profile_interval_usec = value; +} + +int vmprof_get_signal_type(void) { + return signal_type; +} + +char *vmprof_init(int fd, double interval, int memory, + int proflines, const char *interp_name, int native, int real_time) +{ + if (!(interval >= 1e-6 && interval < 1.0)) { /* also if it is NaN */ + return "bad value for 'interval'"; + } + prepare_interval_usec = (int)(interval * 1000000.0); + + if (prepare_concurrent_bufs() < 0) + return "out of memory"; +#if VMPROF_UNIX + if (real_time) { + signal_type = SIGALRM; + itimer_type = ITIMER_REAL; + } else { + signal_type = SIGPROF; + itimer_type = ITIMER_PROF; + } + set_current_codes(NULL); + assert(fd >= 0); +#else + if (memory) { + return "memory tracking only supported on unix"; + } + if (native) { + return "native profiling only supported on unix"; + } +#endif + vmp_set_profile_fileno(fd); + if (opened_profile(interp_name, memory, proflines, native, real_time) < 0) { + vmp_set_profile_fileno(0); + return strerror(errno); + } + return NULL; +} + +int opened_profile(const char *interp_name, int memory, int proflines, int native, int real_time) +{ + int success; + int bits; + struct { + long hdr[5]; + char interp_name[259]; + } header; + + const char * machine; + size_t namelen = strnlen(interp_name, 255); + + machine = vmp_machine_os_name(); + + header.hdr[0] = 0; + header.hdr[1] = 3; + header.hdr[2] = 0; + header.hdr[3] = prepare_interval_usec; + if (strstr(machine, "win64") != 0) { + header.hdr[4] = 1; + } else { + header.hdr[4] = 0; + } + header.interp_name[0] = MARKER_HEADER; + header.interp_name[1] = '\x00'; + header.interp_name[2] = VERSION_TIMESTAMP; + header.interp_name[3] = memory*PROFILE_MEMORY + proflines*PROFILE_LINES + \ + native*PROFILE_NATIVE + real_time*PROFILE_REAL_TIME; +#ifdef RPYTHON_VMPROF + header.interp_name[3] += PROFILE_RPYTHON; +#endif + header.interp_name[4] = (char)namelen; + + memcpy(&header.interp_name[5], interp_name, namelen); + success = vmp_write_all((char*)&header, 5 * sizeof(long) + 5 + namelen); + if (success < 0) { + return success; + } + + /* Write the time and the zone to the log file, profiling will start now */ + (void)vmp_write_time_now(MARKER_TIME_N_ZONE); + + /* write some more meta information */ + vmp_write_meta("os", machine); + bits = vmp_machine_bits(); + if (bits == 64) { + vmp_write_meta("bits", "64"); + } else if (bits == 32) { + vmp_write_meta("bits", "32"); + } + + return success; +} + + +/* Seems that CPython 3.5.1 made our job harder. Did not find out how + to do that without these hacks. We can't use PyThreadState_GET(), + because that calls PyThreadState_Get() which fails an assert if the + result is NULL. */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ +void *volatile _PyThreadState_Current; + /* XXX simple volatile access is assumed atomic */ +# define _Py_atomic_load_relaxed(pp) (*(pp)) +#endif + +#ifdef RPYTHON_VMPROF +#ifndef RPYTHON_LL2CTYPES +PY_STACK_FRAME_T *get_vmprof_stack(void) +{ + struct pypy_threadlocal_s *tl; + _OP_THREADLOCALREF_ADDR_SIGHANDLER(tl); + if (tl == NULL) { + return NULL; + } else { + return tl->vmprof_tl_stack; + } +} +#else +PY_STACK_FRAME_T *get_vmprof_stack(void) +{ + return 0; +} +#endif + +intptr_t vmprof_get_traceback(void *stack, void *ucontext, + intptr_t *result_p, intptr_t result_length) +{ + int n; + int enabled; +#ifdef VMPROF_WINDOWS + intptr_t pc = 0; /* XXX implement me */ +#else + intptr_t pc = ucontext ? (intptr_t)GetPC((ucontext_t *)ucontext) : 0; +#endif + if (stack == NULL) { + stack = get_vmprof_stack(); + } +#ifdef VMP_SUPPORTS_NATIVE_PROFILING + enabled = vmp_native_enabled(); + vmp_native_disable(); +#endif + n = get_stack_trace(stack, result_p, result_length - 2, pc); +#ifdef VMP_SUPPORTS_NATIVE_PROFILING + if (enabled) { + vmp_native_enable(); + } +#endif + return (intptr_t)n; +} +#endif + +#ifdef VMPROF_UNIX + +ssize_t search_thread(pthread_t tid, ssize_t i) +{ + if (i < 0) + i = 0; + while ((size_t)i < thread_count) { + if (pthread_equal(threads[i], tid)) + return i; + i++; + } + return -1; +} + +ssize_t insert_thread(pthread_t tid, ssize_t i) +{ + assert(signal_type == SIGALRM); + i = search_thread(tid, i); + if (i > 0) + return -1; + if (thread_count == threads_size) { + threads_size += threads_size_step; + threads = realloc(threads, sizeof(pid_t) * threads_size); + assert(threads != NULL); + memset(threads + thread_count, 0, sizeof(pid_t) * threads_size_step); + } + threads[thread_count++] = tid; + return thread_count; +} + +ssize_t remove_thread(pthread_t tid, ssize_t i) +{ + assert(signal_type == SIGALRM); + if (thread_count == 0) + return -1; + if (threads == NULL) + return -1; + i = search_thread(tid, i); + if (i < 0) + return -1; + threads[i] = threads[--thread_count]; + threads[thread_count] = 0; + return thread_count; +} + +ssize_t remove_threads(void) +{ + assert(signal_type == SIGALRM); + if (threads != NULL) { + free(threads); + threads = NULL; + } + thread_count = 0; + threads_size = 0; + return 0; +} + +int broadcast_signal_for_threads(void) +{ + int done = 1; + size_t i = 0; + pthread_t self = pthread_self(); + pthread_t tid; + while (i < thread_count) { + tid = threads[i]; + if (pthread_equal(tid, self)) { + done = 0; + } else if (pthread_kill(tid, SIGALRM)) { + remove_thread(tid, i); + } + i++; + } + return done; +} + +int is_main_thread(void) +{ +#ifdef VMPROF_LINUX + pid_t pid = getpid(); + pid_t tid = (pid_t) syscall(SYS_gettid); + return (pid == tid); +#elif defined(VMPROF_APPLE) + return pthread_main_np(); +#endif +} + +#endif diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_common.h b/rpython/rlib/rvmprof/src/shared/vmprof_common.h --- a/rpython/rlib/rvmprof/src/shared/vmprof_common.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof_common.h @@ -8,84 +8,27 @@ #include #include -#ifndef VMPROF_WINDOWS +#ifdef VMPROF_UNIX #include #include "vmprof_mt.h" +#include +#include #endif +#include "vmprof_getpc.h" + #ifdef VMPROF_LINUX #include #endif #define MAX_FUNC_NAME 1024 -static long prepare_interval_usec = 0; -static long profile_interval_usec = 0; - -static int opened_profile(const char *interp_name, int memory, int proflines, int native, int real_time); - -#ifdef VMPROF_UNIX -static int signal_type = SIGPROF; -static int itimer_type = ITIMER_PROF; -static pthread_t *threads = NULL; -static size_t threads_size = 0; -static size_t thread_count = 0; -static size_t threads_size_step = 8; -static struct profbuf_s *volatile current_codes; -#endif - #ifdef VMPROF_UNIX -static inline ssize_t search_thread(pthread_t tid, ssize_t i) { - if (i < 0) - i = 0; - while ((size_t)i < thread_count) { - if (pthread_equal(threads[i], tid)) - return i; - i++; - } - return -1; -} - -ssize_t insert_thread(pthread_t tid, ssize_t i) { - assert(signal_type == SIGALRM); - i = search_thread(tid, i); - if (i > 0) - return -1; - if (thread_count == threads_size) { - threads_size += threads_size_step; - threads = realloc(threads, sizeof(pid_t) * threads_size); - assert(threads != NULL); - memset(threads + thread_count, 0, sizeof(pid_t) * threads_size_step); - } - threads[thread_count++] = tid; - return thread_count; -} - -ssize_t remove_thread(pthread_t tid, ssize_t i) { - assert(signal_type == SIGALRM); - if (thread_count == 0) - return -1; - if (threads == NULL) - return -1; - i = search_thread(tid, i); - if (i < 0) - return -1; - threads[i] = threads[--thread_count]; - threads[thread_count] = 0; - return thread_count; -} - -ssize_t remove_threads(void) { - assert(signal_type == SIGALRM); - if (threads != NULL) { - free(threads); - threads = NULL; - } - thread_count = 0; - threads_size = 0; - return 0; -} +ssize_t search_thread(pthread_t tid, ssize_t i); +ssize_t insert_thread(pthread_t tid, ssize_t i); +ssize_t remove_thread(pthread_t tid, ssize_t i); +ssize_t remove_threads(void); #endif @@ -130,95 +73,9 @@ RPY_EXTERN char *vmprof_init(int fd, double interval, int memory, - int proflines, const char *interp_name, int native, int real_time) -{ - if (!(interval >= 1e-6 && interval < 1.0)) { /* also if it is NaN */ - return "bad value for 'interval'"; - } - prepare_interval_usec = (int)(interval * 1000000.0); + int proflines, const char *interp_name, int native, int real_time); - if (prepare_concurrent_bufs() < 0) - return "out of memory"; -#if VMPROF_UNIX - if (real_time) { - signal_type = SIGALRM; - itimer_type = ITIMER_REAL; - } else { - signal_type = SIGPROF; - itimer_type = ITIMER_PROF; - } - current_codes = NULL; - assert(fd >= 0); -#else - if (memory) { - return "memory tracking only supported on unix"; - } - if (native) { - return "native profiling only supported on unix"; - } -#endif - vmp_set_profile_fileno(fd); - if (opened_profile(interp_name, memory, proflines, native, real_time) < 0) { - vmp_set_profile_fileno(0); - return strerror(errno); - } - return NULL; -} - -static int opened_profile(const char *interp_name, int memory, int proflines, int native, int real_time) -{ - int success; - int bits; - struct { - long hdr[5]; - char interp_name[259]; - } header; - - const char * machine; - size_t namelen = strnlen(interp_name, 255); - - machine = vmp_machine_os_name(); - - header.hdr[0] = 0; - header.hdr[1] = 3; - header.hdr[2] = 0; - header.hdr[3] = prepare_interval_usec; - if (strstr(machine, "win64") != 0) { - header.hdr[4] = 1; - } else { - header.hdr[4] = 0; - } - header.interp_name[0] = MARKER_HEADER; - header.interp_name[1] = '\x00'; - header.interp_name[2] = VERSION_TIMESTAMP; - header.interp_name[3] = memory*PROFILE_MEMORY + proflines*PROFILE_LINES + \ - native*PROFILE_NATIVE + real_time*PROFILE_REAL_TIME; -#ifdef RPYTHON_VMPROF - header.interp_name[3] += PROFILE_RPYTHON; -#endif - header.interp_name[4] = (char)namelen; - - memcpy(&header.interp_name[5], interp_name, namelen); - success = vmp_write_all((char*)&header, 5 * sizeof(long) + 5 + namelen); - if (success < 0) { - return success; - } - - /* Write the time and the zone to the log file, profiling will start now */ - (void)vmp_write_time_now(MARKER_TIME_N_ZONE); - - /* write some more meta information */ - vmp_write_meta("os", machine); - bits = vmp_machine_bits(); - if (bits == 64) { - vmp_write_meta("bits", "64"); - } else if (bits == 32) { - vmp_write_meta("bits", "32"); - } - - return success; -} - +int opened_profile(const char *interp_name, int memory, int proflines, int native, int real_time); /* Seems that CPython 3.5.1 made our job harder. Did not find out how to do that without these hacks. We can't use PyThreadState_GET(), @@ -233,46 +90,22 @@ #ifdef RPYTHON_VMPROF #ifndef RPYTHON_LL2CTYPES -static PY_STACK_FRAME_T *get_vmprof_stack(void) -{ - struct pypy_threadlocal_s *tl; - _OP_THREADLOCALREF_ADDR_SIGHANDLER(tl); - if (tl == NULL) - return NULL; - else - return tl->vmprof_tl_stack; -} -#else -static PY_STACK_FRAME_T *get_vmprof_stack(void) -{ - return 0; -} +PY_STACK_FRAME_T *get_vmprof_stack(void); +#endif +RPY_EXTERN +intptr_t vmprof_get_traceback(void *stack, void *ucontext, + intptr_t *result_p, intptr_t result_length); #endif -RPY_EXTERN -intptr_t vmprof_get_traceback(void *stack, void *ucontext, - intptr_t *result_p, intptr_t result_length) -{ - int n; - int enabled; -#ifdef VMPROF_WINDOWS - intptr_t pc = 0; /* XXX implement me */ -#else - intptr_t pc = ucontext ? (intptr_t)GetPC((ucontext_t *)ucontext) : 0; +int vmprof_get_signal_type(void); +long vmprof_get_prepare_interval_usec(void); +long vmprof_get_profile_interval_usec(void); +void vmprof_set_prepare_interval_usec(long value); +void vmprof_set_profile_interval_usec(long value); +int vmprof_is_enabled(void); +void vmprof_set_enabled(int value); +int vmprof_get_itimer_type(void); +#ifdef VMPROF_UNIX +int broadcast_signal_for_threads(void); +int is_main_thread(void); #endif - if (stack == NULL) { - stack = get_vmprof_stack(); - } -#ifdef VMP_SUPPORTS_NATIVE_PROFILING - enabled = vmp_native_enabled(); - vmp_native_disable(); -#endif - n = get_stack_trace(stack, result_p, result_length - 2, pc); -#ifdef VMP_SUPPORTS_NATIVE_PROFILING - if (enabled) { - vmp_native_enable(); - } -#endif - return (intptr_t)n; -} -#endif diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_getpc.h b/rpython/rlib/rvmprof/src/shared/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/shared/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof_getpc.h @@ -142,6 +142,7 @@ // the right value for your system, and add it to the list in // vmrpof_config.h #else + static intptr_t GetPC(ucontext_t *signal_ucontext) { return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h } diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_memory.c b/rpython/rlib/rvmprof/src/shared/vmprof_memory.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_memory.c @@ -0,0 +1,81 @@ +#include "vmprof_memory.h" + +#ifdef VMPROF_APPLE +/* On OS X we can get RSS using the Mach API. */ +#include +#include +#include +#include + +static mach_port_t mach_task; +#else +#include +#include +#include +#include +#include +#include +#include +/* On '''normal''' Unices we can get RSS from '/proc//status'. */ +static int proc_file = -1; +#endif + +int setup_rss(void) +{ +#ifdef VMPROF_LINUX + char buf[128]; + + sprintf(buf, "/proc/%d/status", getpid()); + proc_file = open(buf, O_RDONLY); + return proc_file; +#elif defined(VMPROF_APPLE) + mach_task = mach_task_self(); + return 0; +#else + return 0; +#endif +} + +int teardown_rss(void) +{ +#ifdef VMPROF_LINUX + close(proc_file); + proc_file = -1; + return 0; +#else + return 0; +#endif +} + +long get_current_proc_rss(void) +{ +#ifdef VMPROF_LINUX + char buf[1024]; + int i = 0; + + if (lseek(proc_file, 0, SEEK_SET) == -1) + return -1; + if (read(proc_file, buf, 1024) == -1) + return -1; + while (i < 1020) { + if (strncmp(buf + i, "VmRSS:\t", 7) == 0) { + i += 7; + return atoi(buf + i); + } + i++; + } + return -1; +#elif defined(VMPROF_APPLE) + mach_msg_type_number_t out_count = MACH_TASK_BASIC_INFO_COUNT; + mach_task_basic_info_data_t taskinfo = { .resident_size = 0 }; + + kern_return_t error = task_info(mach_task, MACH_TASK_BASIC_INFO, (task_info_t)&taskinfo, &out_count); + if (error == KERN_SUCCESS) { + return (long)(taskinfo.resident_size / 1024); + } else { + return -1; + } +#else + return -1; // not implemented +#endif +} diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_memory.h b/rpython/rlib/rvmprof/src/shared/vmprof_memory.h new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_memory.h @@ -0,0 +1,5 @@ +#pragma once + +int setup_rss(void); +int teardown_rss(void); +long get_current_proc_rss(void); diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_mt.c b/rpython/rlib/rvmprof/src/shared/vmprof_mt.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_mt.c @@ -0,0 +1,181 @@ +#include "vmprof_mt.h" +/* Support for multithreaded write() operations (implementation) */ + +#include + +#if defined(__i386__) || defined(__amd64__) + static inline void write_fence(void) { asm("" : : : "memory"); } +#else + static inline void write_fence(void) { __sync_synchronize(); } +#endif + +static char volatile profbuf_state[MAX_NUM_BUFFERS]; +static struct profbuf_s *profbuf_all_buffers = NULL; +static int volatile profbuf_write_lock = 2; +static long profbuf_pending_write; + + +static void unprepare_concurrent_bufs(void) +{ + if (profbuf_all_buffers != NULL) { + munmap(profbuf_all_buffers, sizeof(struct profbuf_s) * MAX_NUM_BUFFERS); + profbuf_all_buffers = NULL; + } +} + +int prepare_concurrent_bufs(void) +{ + assert(sizeof(struct profbuf_s) == 8192); + + unprepare_concurrent_bufs(); + profbuf_all_buffers = mmap(NULL, sizeof(struct profbuf_s) * MAX_NUM_BUFFERS, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, 0); + if (profbuf_all_buffers == MAP_FAILED) { + profbuf_all_buffers = NULL; + return -1; + } + memset((char *)profbuf_state, PROFBUF_UNUSED, sizeof(profbuf_state)); + profbuf_write_lock = 0; + profbuf_pending_write = -1; + return 0; +} + +static int _write_single_ready_buffer(int fd, long i) +{ + /* Try to write to disk the buffer number 'i'. This function must + only be called while we hold the write lock. */ + assert(profbuf_write_lock != 0); + + if (profbuf_pending_write >= 0) { + /* A partially written buffer is waiting. We'll write the + rest of this buffer now, instead of 'i'. */ + i = profbuf_pending_write; + assert(profbuf_state[i] == PROFBUF_READY); + } + + if (profbuf_state[i] != PROFBUF_READY) { + /* this used to be a race condition: the buffer was written by a + different thread already, nothing to do now */ + return 0; + } + + int err; + struct profbuf_s *p = &profbuf_all_buffers[i]; + ssize_t count = write(fd, p->data + p->data_offset, p->data_size); + if (count == p->data_size) { + profbuf_state[i] = PROFBUF_UNUSED; + profbuf_pending_write = -1; + } + else { + if (count > 0) { + p->data_offset += count; + p->data_size -= count; + } + profbuf_pending_write = i; + if (count < 0) + return -1; + } + return 0; +} + +static void _write_ready_buffers(int fd) +{ + long i; + int has_write_lock = 0; + + for (i = 0; i < MAX_NUM_BUFFERS; i++) { + if (profbuf_state[i] == PROFBUF_READY) { + if (!has_write_lock) { + if (!__sync_bool_compare_and_swap(&profbuf_write_lock, 0, 1)) + return; /* can't acquire the write lock, give up */ + has_write_lock = 1; + } + if (_write_single_ready_buffer(fd, i) < 0) + break; + } + } + if (has_write_lock) + profbuf_write_lock = 0; +} + +struct profbuf_s *reserve_buffer(int fd) +{ + /* Tries to enter a region of code that fills one buffer. If + successful, returns the profbuf_s. It fails only if the + concurrent buffers are all busy (extreme multithreaded usage). + + This might call write() to emit the data sitting in + previously-prepared buffers. In case of write() error, the + error is ignored but unwritten data stays in the buffers. + */ + long i; + + _write_ready_buffers(fd); + + for (i = 0; i < MAX_NUM_BUFFERS; i++) { + if (profbuf_state[i] == PROFBUF_UNUSED && + __sync_bool_compare_and_swap(&profbuf_state[i], PROFBUF_UNUSED, + PROFBUF_FILLING)) { + struct profbuf_s *p = &profbuf_all_buffers[i]; + p->data_size = 0; + p->data_offset = 0; + return p; + } + } + /* no unused buffer found */ + return NULL; +} + +void commit_buffer(int fd, struct profbuf_s *buf) +{ + /* Leaves a region of code that filled 'buf'. + + This might call write() to emit the data now ready. In case of + write() error, the error is ignored but unwritten data stays in + the buffers. + */ + + /* Make sure every thread sees the full content of 'buf' */ + write_fence(); + + /* Then set the 'ready' flag */ + long i = buf - profbuf_all_buffers; + assert(profbuf_state[i] == PROFBUF_FILLING); + profbuf_state[i] = PROFBUF_READY; + + if (!__sync_bool_compare_and_swap(&profbuf_write_lock, 0, 1)) { + /* can't acquire the write lock, ignore */ + } + else { + _write_single_ready_buffer(fd, i); + profbuf_write_lock = 0; + } +} + +void cancel_buffer(struct profbuf_s *buf) +{ + long i = buf - profbuf_all_buffers; + assert(profbuf_state[i] == PROFBUF_FILLING); + profbuf_state[i] = PROFBUF_UNUSED; +} + +int shutdown_concurrent_bufs(int fd) +{ + /* no signal handler can be running concurrently here, because we + already did vmprof_ignore_signals(1) */ + assert(profbuf_write_lock == 0); + profbuf_write_lock = 2; + + /* last attempt to flush buffers */ + int i; + for (i = 0; i < MAX_NUM_BUFFERS; i++) { + while (profbuf_state[i] == PROFBUF_READY) { + if (_write_single_ready_buffer(fd, i) < 0) + return -1; + } + } + unprepare_concurrent_bufs(); + return 0; +} diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_mt.h b/rpython/rlib/rvmprof/src/shared/vmprof_mt.h --- a/rpython/rlib/rvmprof/src/shared/vmprof_mt.h +++ b/rpython/rlib/rvmprof/src/shared/vmprof_mt.h @@ -1,11 +1,11 @@ #pragma once /* Support for multithreaded write() operations */ +#include "vmprof.h" + #include #include -#include "vmprof.h" - /* The idea is that we have MAX_NUM_BUFFERS available, all of size SINGLE_BUF_SIZE. Threads and signal handlers can ask to reserve a buffer, fill it, and finally "commit" it, at which point its @@ -29,12 +29,6 @@ */ #define MAX_NUM_BUFFERS 20 -#if defined(__i386__) || defined(__amd64__) - static inline void write_fence(void) { asm("" : : : "memory"); } -#else - static inline void write_fence(void) { __sync_synchronize(); } -#endif - #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif @@ -50,173 +44,8 @@ char data[SINGLE_BUF_SIZE]; }; -static char volatile profbuf_state[MAX_NUM_BUFFERS]; -static struct profbuf_s *profbuf_all_buffers = NULL; -static int volatile profbuf_write_lock = 2; -static long profbuf_pending_write; - - -static void unprepare_concurrent_bufs(void) -{ - if (profbuf_all_buffers != NULL) { - munmap(profbuf_all_buffers, sizeof(struct profbuf_s) * MAX_NUM_BUFFERS); - profbuf_all_buffers = NULL; - } -} - -static int prepare_concurrent_bufs(void) -{ - assert(sizeof(struct profbuf_s) == 8192); - - unprepare_concurrent_bufs(); - profbuf_all_buffers = mmap(NULL, sizeof(struct profbuf_s) * MAX_NUM_BUFFERS, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, - -1, 0); - if (profbuf_all_buffers == MAP_FAILED) { - profbuf_all_buffers = NULL; - return -1; - } - memset((char *)profbuf_state, PROFBUF_UNUSED, sizeof(profbuf_state)); - profbuf_write_lock = 0; - profbuf_pending_write = -1; - return 0; -} - -static int _write_single_ready_buffer(int fd, long i) -{ - /* Try to write to disk the buffer number 'i'. This function must - only be called while we hold the write lock. */ - assert(profbuf_write_lock != 0); - - if (profbuf_pending_write >= 0) { - /* A partially written buffer is waiting. We'll write the - rest of this buffer now, instead of 'i'. */ - i = profbuf_pending_write; - assert(profbuf_state[i] == PROFBUF_READY); - } - - if (profbuf_state[i] != PROFBUF_READY) { - /* this used to be a race condition: the buffer was written by a - different thread already, nothing to do now */ - return 0; - } - - int err; - struct profbuf_s *p = &profbuf_all_buffers[i]; - ssize_t count = write(fd, p->data + p->data_offset, p->data_size); - if (count == p->data_size) { - profbuf_state[i] = PROFBUF_UNUSED; - profbuf_pending_write = -1; - } - else { - if (count > 0) { - p->data_offset += count; - p->data_size -= count; - } - profbuf_pending_write = i; - if (count < 0) - return -1; - } - return 0; -} - -static void _write_ready_buffers(int fd) -{ - long i; - int has_write_lock = 0; - - for (i = 0; i < MAX_NUM_BUFFERS; i++) { - if (profbuf_state[i] == PROFBUF_READY) { - if (!has_write_lock) { - if (!__sync_bool_compare_and_swap(&profbuf_write_lock, 0, 1)) - return; /* can't acquire the write lock, give up */ - has_write_lock = 1; - } - if (_write_single_ready_buffer(fd, i) < 0) - break; - } - } - if (has_write_lock) - profbuf_write_lock = 0; -} - -static struct profbuf_s *reserve_buffer(int fd) -{ - /* Tries to enter a region of code that fills one buffer. If - successful, returns the profbuf_s. It fails only if the - concurrent buffers are all busy (extreme multithreaded usage). - - This might call write() to emit the data sitting in - previously-prepared buffers. In case of write() error, the - error is ignored but unwritten data stays in the buffers. - */ - long i; - - _write_ready_buffers(fd); - - for (i = 0; i < MAX_NUM_BUFFERS; i++) { - if (profbuf_state[i] == PROFBUF_UNUSED && - __sync_bool_compare_and_swap(&profbuf_state[i], PROFBUF_UNUSED, - PROFBUF_FILLING)) { - struct profbuf_s *p = &profbuf_all_buffers[i]; - p->data_size = 0; - p->data_offset = 0; - return p; - } - } - /* no unused buffer found */ - return NULL; -} - -static void commit_buffer(int fd, struct profbuf_s *buf) -{ - /* Leaves a region of code that filled 'buf'. - - This might call write() to emit the data now ready. In case of - write() error, the error is ignored but unwritten data stays in - the buffers. - */ - - /* Make sure every thread sees the full content of 'buf' */ - write_fence(); - - /* Then set the 'ready' flag */ - long i = buf - profbuf_all_buffers; - assert(profbuf_state[i] == PROFBUF_FILLING); - profbuf_state[i] = PROFBUF_READY; - - if (!__sync_bool_compare_and_swap(&profbuf_write_lock, 0, 1)) { - /* can't acquire the write lock, ignore */ - } - else { - _write_single_ready_buffer(fd, i); - profbuf_write_lock = 0; - } -} - -static void cancel_buffer(struct profbuf_s *buf) -{ - long i = buf - profbuf_all_buffers; - assert(profbuf_state[i] == PROFBUF_FILLING); - profbuf_state[i] = PROFBUF_UNUSED; -} - -static int shutdown_concurrent_bufs(int fd) -{ - /* no signal handler can be running concurrently here, because we - already did vmprof_ignore_signals(1) */ - assert(profbuf_write_lock == 0); - profbuf_write_lock = 2; - - /* last attempt to flush buffers */ - int i; - for (i = 0; i < MAX_NUM_BUFFERS; i++) { - while (profbuf_state[i] == PROFBUF_READY) { - if (_write_single_ready_buffer(fd, i) < 0) - return -1; - } - } - unprepare_concurrent_bufs(); - return 0; -} +int prepare_concurrent_bufs(void); +struct profbuf_s *reserve_buffer(int fd); +void commit_buffer(int fd, struct profbuf_s *buf); +void cancel_buffer(struct profbuf_s *buf); +int shutdown_concurrent_bufs(int fd); diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_unix.c b/rpython/rlib/rvmprof/src/shared/vmprof_unix.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_unix.c @@ -0,0 +1,496 @@ +#include "vmprof_unix.h" + +#ifdef VMPROF_UNIX + +#if VMPROF_LINUX +#include +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vmp_stack.h" +#include "vmprof_mt.h" +#include "vmprof_getpc.h" +#include "vmprof_common.h" +#include "vmprof_memory.h" +#include "compat.h" + + + +/* value: LSB bit is 1 if signals must be ignored; all other bits + are a counter for how many threads are currently in a signal handler */ +static long volatile signal_handler_ignore = 1; +static long volatile signal_handler_entries = 0; +static char atfork_hook_installed = 0; +static volatile int spinlock; +static jmp_buf restore_point; +static struct profbuf_s *volatile current_codes; + + +void vmprof_ignore_signals(int ignored) +{ + if (ignored) { + /* set the last bit, and wait until concurrently-running signal + handlers finish */ + __sync_add_and_fetch(&signal_handler_ignore, 1L); + while (signal_handler_entries != 0L) { + usleep(1); + } + } else { + __sync_sub_and_fetch(&signal_handler_ignore, 1L); + } +} + +long vmprof_enter_signal(void) +{ + __sync_fetch_and_add(&signal_handler_entries, 1L); + return signal_handler_ignore; +} + +long vmprof_exit_signal(void) +{ + return __sync_sub_and_fetch(&signal_handler_entries, 1L); +} + +int install_pthread_atfork_hooks(void) { + /* this is needed to prevent the problems described there: + - http://code.google.com/p/gperftools/issues/detail?id=278 + - http://lists.debian.org/debian-glibc/2010/03/msg00161.html + + TL;DR: if the RSS of the process is large enough, the clone() syscall + will be interrupted by the SIGPROF before it can complete, then + retried, interrupted again and so on, in an endless loop. The + solution is to disable the timer around the fork, and re-enable it + only inside the parent. + */ + if (atfork_hook_installed) + return 0; + int ret = pthread_atfork(atfork_disable_timer, atfork_enable_timer, atfork_close_profile_file); + if (ret != 0) + return -1; + atfork_hook_installed = 1; + return 0; +} + +void segfault_handler(int arg) +{ + longjmp(restore_point, SIGSEGV); +} + +int _vmprof_sample_stack(struct profbuf_s *p, PY_THREAD_STATE_T * tstate, ucontext_t * uc) +{ + int depth; + struct prof_stacktrace_s *st = (struct prof_stacktrace_s *)p->data; + st->marker = MARKER_STACKTRACE; + st->count = 1; +#ifdef RPYTHON_VMPROF + depth = get_stack_trace(get_vmprof_stack(), st->stack, MAX_STACK_DEPTH-1, (intptr_t)GetPC(uc)); +#else + depth = get_stack_trace(tstate, st->stack, MAX_STACK_DEPTH-1, (intptr_t)NULL); +#endif + // useful for tests (see test_stop_sampling) +#ifndef RPYTHON_LL2CTYPES + if (depth == 0) { + return 0; + } +#endif + st->depth = depth; + st->stack[depth++] = tstate; + long rss = get_current_proc_rss(); + if (rss >= 0) + st->stack[depth++] = (void*)rss; + p->data_offset = offsetof(struct prof_stacktrace_s, marker); + p->data_size = (depth * sizeof(void *) + + sizeof(struct prof_stacktrace_s) - + offsetof(struct prof_stacktrace_s, marker)); + return 1; +} + +#ifndef RPYTHON_VMPROF +PY_THREAD_STATE_T * _get_pystate_for_this_thread(void) { + // see issue 116 on github.com/vmprof/vmprof-python. + // PyGILState_GetThisThreadState(); can hang forever + // + PyInterpreterState * istate; + PyThreadState * state; + long mythread_id; + + mythread_id = PyThread_get_thread_ident(); + istate = PyInterpreterState_Head(); + if (istate == NULL) { + fprintf(stderr, "WARNING: interp state head is null (for thread id %ld)\n", mythread_id); + return NULL; + } + // fish fish fish, it will NOT lock the keymutex in pythread + do { + state = PyInterpreterState_ThreadHead(istate); + do { + if (state->thread_id == mythread_id) { + return state; + } + } while ((state = PyThreadState_Next(state)) != NULL); + } while ((istate = PyInterpreterState_Next(istate)) != NULL); + + // uh? not found? + fprintf(stderr, "WARNING: cannot find thread state (for thread id %ld), sample will be thrown away\n", mythread_id); + return NULL; +} +#endif + +void flush_codes(void) +{ + struct profbuf_s *p = current_codes; + if (p != NULL) { + current_codes = NULL; + commit_buffer(vmp_profile_fileno(), p); + } +} + +void set_current_codes(void * to) { + current_codes = to; +} + +#endif + +void vmprof_aquire_lock(void) { + while (__sync_lock_test_and_set(&spinlock, 1)) { + } +} + +void vmprof_release_lock(void) { + __sync_lock_release(&spinlock); +} + +void sigprof_handler(int sig_nr, siginfo_t* info, void *ucontext) +{ + int commit; + PY_THREAD_STATE_T * tstate = NULL; + void (*prevhandler)(int); + +#ifndef RPYTHON_VMPROF + + // Even though the docs say that this function call is for 'esoteric use' + // it seems to be correctly set when the interpreter is teared down! + if (!Py_IsInitialized()) { + return; + } + + // TERRIBLE HACK AHEAD + // on OS X, the thread local storage is sometimes uninitialized + // when the signal handler runs - it means it's impossible to read errno + // or call any syscall or read PyThread_Current or pthread_self. Additionally, + // it seems impossible to read the register gs. + // here we register segfault handler (all guarded by a spinlock) and call + // longjmp in case segfault happens while reading a thread local + // + // We do the same error detection for linux to ensure that + // get_current_thread_state returns a sane result + while (__sync_lock_test_and_set(&spinlock, 1)) { + } + +#ifdef VMPROF_UNIX + // SIGNAL ABUSE AHEAD + // On linux, the prof timer will deliver the signal to the thread which triggered the timer, + // because these timers are based on process and system time, and as such, are thread-aware. + // For the real timer, the signal gets delivered to the main thread, seemingly always. + // Consequently if we want to sample multiple threads, we need to forward this signal. + if (vmprof_get_signal_type() == SIGALRM) { + if (is_main_thread() && broadcast_signal_for_threads()) { + __sync_lock_release(&spinlock); + return; + } + } +#endif + + prevhandler = signal(SIGSEGV, &segfault_handler); + int fault_code = setjmp(restore_point); + if (fault_code == 0) { + pthread_self(); + tstate = _get_pystate_for_this_thread(); + } else { + signal(SIGSEGV, prevhandler); + __sync_lock_release(&spinlock); + return; + } + signal(SIGSEGV, prevhandler); + __sync_lock_release(&spinlock); +#endif + + long val = vmprof_enter_signal(); + + if (val == 0) { + int saved_errno = errno; + int fd = vmp_profile_fileno(); + assert(fd >= 0); + + struct profbuf_s *p = reserve_buffer(fd); + if (p == NULL) { + /* ignore this signal: there are no free buffers right now */ + } else { +#ifdef RPYTHON_VMPROF + commit = _vmprof_sample_stack(p, NULL, (ucontext_t*)ucontext); +#else + commit = _vmprof_sample_stack(p, tstate, (ucontext_t*)ucontext); +#endif + if (commit) { + commit_buffer(fd, p); + } else { +#ifndef RPYTHON_VMPROF + fprintf(stderr, "WARNING: canceled buffer, no stack trace was written\n"); +#else + fprintf(stderr, "WARNING: canceled buffer, no stack trace was written\n"); +#endif + cancel_buffer(p); + } + } + + errno = saved_errno; + } + + vmprof_exit_signal(); +} + +int install_sigprof_handler(void) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = sigprof_handler; + sa.sa_flags = SA_RESTART | SA_SIGINFO; + if (sigemptyset(&sa.sa_mask) == -1 || + sigaction(vmprof_get_signal_type(), &sa, NULL) == -1) + return -1; + return 0; +} + +int remove_sigprof_handler(void) +{ + struct sigaction ign_sigint, prev; + ign_sigint.sa_handler = SIG_IGN; + ign_sigint.sa_flags = 0; + sigemptyset(&ign_sigint.sa_mask); + + if (sigaction(vmprof_get_signal_type(), &ign_sigint, NULL) < 0) { + fprintf(stderr, "Could not remove the signal handler (for profiling)\n"); + return -1; + } + return 0; +} + +int install_sigprof_timer(void) +{ + static struct itimerval timer; + timer.it_interval.tv_sec = 0; + timer.it_interval.tv_usec = (int)vmprof_get_profile_interval_usec(); + timer.it_value = timer.it_interval; + if (setitimer(vmprof_get_itimer_type(), &timer, NULL) != 0) + return -1; + return 0; +} + +int remove_sigprof_timer(void) +{ + static struct itimerval timer; + timerclear(&(timer.it_interval)); + timerclear(&(timer.it_value)); + if (setitimer(vmprof_get_itimer_type(), &timer, NULL) != 0) { + fprintf(stderr, "Could not disable the signal handler (for profiling)\n"); + return -1; + } + return 0; +} + +void atfork_disable_timer(void) +{ + if (vmprof_get_profile_interval_usec() > 0) { + remove_sigprof_timer(); + vmprof_set_enabled(0); + } +} + +void atfork_close_profile_file(void) +{ + int fd = vmp_profile_fileno(); + if (fd != -1) + close(fd); + vmp_set_profile_fileno(-1); +} +void atfork_enable_timer(void) +{ + if (vmprof_get_profile_interval_usec() > 0) { + install_sigprof_timer(); + vmprof_set_enabled(1); + } +} + +#ifdef VMP_SUPPORTS_NATIVE_PROFILING +void init_cpyprof(int native) +{ + // skip this if native should not be enabled + if (!native) { + vmp_native_disable(); + return; + } + vmp_native_enable(); +} + +static void disable_cpyprof(void) +{ + vmp_native_disable(); +} +#endif + +int vmprof_enable(int memory, int native, int real_time) +{ +#ifdef VMP_SUPPORTS_NATIVE_PROFILING + init_cpyprof(native); +#endif + assert(vmp_profile_fileno() >= 0); + assert(vmprof_get_prepare_interval_usec() > 0); + vmprof_set_profile_interval_usec(vmprof_get_prepare_interval_usec()); + if (memory && setup_rss() == -1) + goto error; +#if VMPROF_UNIX + if (real_time && insert_thread(pthread_self(), -1) == -1) + goto error; +#endif + if (install_pthread_atfork_hooks() == -1) + goto error; + if (install_sigprof_handler() == -1) + goto error; + if (install_sigprof_timer() == -1) + goto error; + vmprof_ignore_signals(0); + return 0; + + error: + vmp_set_profile_fileno(-1); + vmprof_set_profile_interval_usec(0); + return -1; +} + + +int close_profile(void) +{ + int fileno = vmp_profile_fileno(); + fsync(fileno); + (void)vmp_write_time_now(MARKER_TRAILER); + teardown_rss(); + + /* don't close() the file descriptor from here */ + vmp_set_profile_fileno(-1); + return 0; +} + +int vmprof_disable(void) +{ + vmprof_ignore_signals(1); + vmprof_set_profile_interval_usec(0); +#ifdef VMP_SUPPORTS_NATIVE_PROFILING + disable_cpyprof(); +#endif + + if (remove_sigprof_timer() == -1) { + return -1; + } + if (remove_sigprof_handler() == -1) { + return -1; + } +#ifdef VMPROF_UNIX + if ((vmprof_get_signal_type() == SIGALRM) && remove_threads() == -1) { + return -1; + } +#endif + flush_codes(); + if (shutdown_concurrent_bufs(vmp_profile_fileno()) < 0) + return -1; + return close_profile(); +} + +int vmprof_register_virtual_function(char *code_name, intptr_t code_uid, + int auto_retry) +{ + long namelen = strnlen(code_name, 1023); + long blocklen = 1 + sizeof(intptr_t) + sizeof(long) + namelen; + struct profbuf_s *p; + char *t; + + retry: + p = current_codes; + if (p != NULL) { + if (__sync_bool_compare_and_swap(¤t_codes, p, NULL)) { + /* grabbed 'current_codes': we will append the current block + to it if it contains enough room */ + size_t freesize = SINGLE_BUF_SIZE - p->data_size; + if (freesize < (size_t)blocklen) { + /* full: flush it */ + commit_buffer(vmp_profile_fileno(), p); + p = NULL; + } + } + else { + /* compare-and-swap failed, don't try again */ + p = NULL; + } + } + + if (p == NULL) { + p = reserve_buffer(vmp_profile_fileno()); + if (p == NULL) { + /* can't get a free block; should almost never be the + case. Spin loop if allowed, or return a failure code + if not (e.g. we're in a signal handler) */ + if (auto_retry > 0) { + auto_retry--; + usleep(1); + goto retry; + } + return -1; + } + } + + t = p->data + p->data_size; + p->data_size += blocklen; + assert(p->data_size <= SINGLE_BUF_SIZE); + *t++ = MARKER_VIRTUAL_IP; + memcpy(t, &code_uid, sizeof(intptr_t)); t += sizeof(intptr_t); + memcpy(t, &namelen, sizeof(long)); t += sizeof(long); + memcpy(t, code_name, namelen); + + /* try to reattach 'p' to 'current_codes' */ + if (!__sync_bool_compare_and_swap(¤t_codes, NULL, p)) { + /* failed, flush it */ + commit_buffer(vmp_profile_fileno(), p); + } + return 0; +} + +int get_stack_trace(PY_THREAD_STATE_T * current, void** result, int max_depth, intptr_t pc) +{ + PY_STACK_FRAME_T * frame; +#ifdef RPYTHON_VMPROF + // do nothing here, + frame = (PY_STACK_FRAME_T*)current; +#else + if (current == NULL) { + fprintf(stderr, "WARNING: get_stack_trace, current is NULL\n"); + return 0; + } + frame = current->frame; +#endif + if (frame == NULL) { + fprintf(stderr, "WARNING: get_stack_trace, frame is NULL\n"); + return 0; + } + return vmp_walk_and_record_stack(frame, result, max_depth, 1, pc); +} diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_unix.h b/rpython/rlib/rvmprof/src/shared/vmprof_unix.h new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_unix.h @@ -0,0 +1,86 @@ +#pragma once + +/* VMPROF + * + * statistical sampling profiler specifically designed to profile programs + * which run on a Virtual Machine and/or bytecode interpreter, such as Python, + * etc. + * + * The logic to dump the C stack traces is partly stolen from the code in + * gperftools. + * The file "getpc.h" has been entirely copied from gperftools. + * + * Tested only on gcc, linux, x86_64. + * + * Copyright (C) 2014-2017 + * Antonio Cuni - anto.cuni at gmail.com + * Maciej Fijalkowski - fijall at gmail.com + * Armin Rigo - arigo at tunes.org + * Richard Plangger - planrichi at gmail.com + * + */ + +#include "vmprof.h" + +#include "vmprof_mt.h" + +#include + +RPY_EXTERN void vmprof_ignore_signals(int ignored); +RPY_EXTERN long vmprof_enter_signal(void); +RPY_EXTERN long vmprof_exit_signal(void); + +/* ************************************************************* + * functions to dump the stack trace + * ************************************************************* + */ + +#ifndef RPYTHON_VMPROF +PY_THREAD_STATE_T * _get_pystate_for_this_thread(void); +#endif +int get_stack_trace(PY_THREAD_STATE_T * current, void** result, int max_depth, intptr_t pc); + +/* ************************************************************* + * the signal handler + * ************************************************************* + */ + +#include + +void segfault_handler(int arg); +int _vmprof_sample_stack(struct profbuf_s *p, PY_THREAD_STATE_T * tstate, ucontext_t * uc); +void sigprof_handler(int sig_nr, siginfo_t* info, void *ucontext); + + +/* ************************************************************* + * the setup and teardown functions + * ************************************************************* + */ + +int install_sigprof_handler(void); +int remove_sigprof_handler(void); +int install_sigprof_timer(void); +int remove_sigprof_timer(void); +void atfork_disable_timer(void); +void atfork_enable_timer(void); +void atfork_close_profile_file(void); +int install_pthread_atfork_hooks(void); + +#ifdef VMP_SUPPORTS_NATIVE_PROFILING +void init_cpyprof(int native); +static void disable_cpyprof(void); +#endif + +int close_profile(void); + +RPY_EXTERN +int vmprof_enable(int memory, int native, int real_time); +RPY_EXTERN +int vmprof_disable(void); +RPY_EXTERN +int vmprof_register_virtual_function(char *code_name, intptr_t code_uid, + int auto_retry); + + +void vmprof_aquire_lock(void); +void vmprof_release_lock(void); diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_win.c b/rpython/rlib/rvmprof/src/shared/vmprof_win.c new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_win.c @@ -0,0 +1,42 @@ +// cannot include this header because it also has definitions +#include "windows.h" +#include "compat.h" +#include "vmp_stack.h" + +HANDLE write_mutex; + +int prepare_concurrent_bufs(void) +{ + if (!(write_mutex = CreateMutex(NULL, FALSE, NULL))) + return -1; + return 0; +} + +#include + +int vmp_write_all(const char *buf, size_t bufsize) +{ + int res; + int fd; + int count; + + res = WaitForSingleObject(write_mutex, INFINITE); + fd = vmp_profile_fileno(); + + if (fd == -1) { + ReleaseMutex(write_mutex); + return -1; + } + while (bufsize > 0) { + count = _write(fd, buf, (long)bufsize); + if (count <= 0) { + ReleaseMutex(write_mutex); + return -1; /* failed */ + } + buf += count; + bufsize -= count; + } + ReleaseMutex(write_mutex); + return 0; +} + diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_win.h b/rpython/rlib/rvmprof/src/shared/vmprof_win.h new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/shared/vmprof_win.h @@ -0,0 +1,203 @@ +#pragma once + +#include "windows.h" +#include "compat.h" +#include "vmp_stack.h" + +HANDLE write_mutex; + +int prepare_concurrent_bufs(void); + +#include "vmprof_common.h" +#include From pypy.commits at gmail.com Tue Jul 25 12:02:12 2017 From: pypy.commits at gmail.com (arigo) Date: Tue, 25 Jul 2017 09:02:12 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Fix for edc44ccff552: the previous fix in clibffi had no effect Message-ID: <59776b84.8dd81c0a.b3c2c.e112@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r91969:a24d6c7000c8 Date: 2017-07-25 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/a24d6c7000c8/ Log: Fix for edc44ccff552: the previous fix in clibffi had no effect diff --git a/rpython/memory/gctransform/support.py b/rpython/memory/gctransform/support.py --- a/rpython/memory/gctransform/support.py +++ b/rpython/memory/gctransform/support.py @@ -1,4 +1,5 @@ from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.annotator import model as annmodel import os @@ -77,23 +78,19 @@ from rpython.rlib.rposix import c_write return c_write(fd, string, len(string)) -def destructor_failed(typename, e): - try: - write(2, "a destructor of type ") - write(2, typename) - write(2, " raised an exception ") - write(2, str(e)) - write(2, " ignoring it\n") - except: - pass -destructor_failed._dont_inline_ = True - def ll_call_destructor(destrptr, destr_v, typename): + llop.revdb_do_next_call(lltype.Void) try: destrptr(destr_v) except Exception as e: - destructor_failed(typename, e) -ll_call_destructor._revdb_do_all_calls_ = True + try: + write(2, "a destructor of type ") + write(2, typename) + write(2, " raised an exception ") + write(2, str(e)) + write(2, " ignoring it\n") + except: + pass def ll_report_finalizer_error(e): try: diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -4,6 +4,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.tool import rffi_platform from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import intmask, is_emulated_long @@ -419,8 +420,8 @@ (what the real callback is for example), casted to VOIDP """ userdata = rffi.cast(USERDATA_P, ll_userdata) + llop.revdb_do_next_call(lltype.Void) userdata.callback(ll_args, ll_res, userdata) -_ll_callback._revdb_do_all_calls_ = True def ll_callback(ffi_cif, ll_res, ll_args, ll_userdata): rposix._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -593,6 +593,7 @@ 'revdb_dtoa': LLOp(sideeffects=False), 'revdb_modf': LLOp(sideeffects=False), 'revdb_frexp': LLOp(sideeffects=False), + 'revdb_do_next_call': LLOp(), } # ***** Run test_lloperation after changes. ***** diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -713,6 +713,10 @@ return gencsupp.cast_gcptr_to_int(self, op) return self.OP_CAST_POINTER(op) + def OP_REVDB_DO_NEXT_CALL(self, op): + self.revdb_do_next_call = True + return "/* revdb_do_next_call */" + def OP_LENGTH_OF_SIMPLE_GCARRAY_FROM_OPAQUE(self, op): return ('%s = *(long *)(((char *)%s) + sizeof(struct pypy_header0));' ' /* length_of_simple_gcarray_from_opaque */' diff --git a/rpython/translator/revdb/gencsupp.py b/rpython/translator/revdb/gencsupp.py --- a/rpython/translator/revdb/gencsupp.py +++ b/rpython/translator/revdb/gencsupp.py @@ -78,10 +78,11 @@ return 'RPY_REVDB_EMIT(%s, %s, %s);' % (normal_code, cdecl(tp, '_e'), value) def emit_residual_call(funcgen, call_code, v_result, expr_result): - if getattr(getattr(funcgen.graph, 'func', None), - '_revdb_do_all_calls_', False): + if hasattr(funcgen, 'revdb_do_next_call'): + del funcgen.revdb_do_next_call return call_code # a hack for ll_call_destructor() to mean - # that the calls should really be done + # that the calls should really be done. + # Also used in rpython.rlib.clibffi. # if call_code in ('RPyGilAcquire();', 'RPyGilRelease();'): # Could also work with a regular RPY_REVDB_CALL_VOID, but we From pypy.commits at gmail.com Tue Jul 25 12:15:05 2017 From: pypy.commits at gmail.com (arigo) Date: Tue, 25 Jul 2017 09:15:05 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Added tag RevDB-pypy2.7-v5.6.2 for changeset a24d6c7000c8 Message-ID: <59776e89.cea2df0a.5cf88.8b0c@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r91970:b0b66add46af Date: 2017-07-25 18:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b0b66add46af/ Log: Added tag RevDB-pypy2.7-v5.6.2 for changeset a24d6c7000c8 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -38,3 +38,4 @@ d7724c0a5700b895a47de44074cdf5fd659a988f RevDB-pypy2.7-v5.4.1 aff251e543859ce4508159dd9f1a82a2f553de00 release-pypy2.7-v5.6.0 e90317857d27917bf840caf675832292ee070510 RevDB-pypy2.7-v5.6.1 +a24d6c7000c8099c73d3660857f7e3cee5ac045c RevDB-pypy2.7-v5.6.2 From pypy.commits at gmail.com Tue Jul 25 14:20:49 2017 From: pypy.commits at gmail.com (rlamy) Date: Tue, 25 Jul 2017 11:20:49 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: A failing test that explains why test_subclass() leaks the class Sub Message-ID: <59778c01.0387df0a.669fe.1276@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91971:82d95ae3d2c8 Date: 2017-07-25 20:19 +0200 http://bitbucket.org/pypy/pypy/changeset/82d95ae3d2c8/ Log: A failing test that explains why test_subclass() leaks the class Sub diff --git a/pypy/module/cpyext/test/array.c b/pypy/module/cpyext/test/array.c --- a/pypy/module/cpyext/test/array.c +++ b/pypy/module/cpyext/test/array.c @@ -2468,6 +2468,15 @@ Py_RETURN_NONE; } +static PyObject * +same_dealloc(PyObject *self, PyObject *args) +{ + PyObject *obj1, *obj2; + if (!PyArg_ParseTuple(args, "OO", &obj1, &obj2)) { + return NULL; + } + return PyLong_FromLong(obj1->ob_type->tp_dealloc == obj2->ob_type->tp_dealloc); +} /*********************** Install Module **************************/ @@ -2477,6 +2486,7 @@ {"readbuffer_as_string", (PyCFunction)readbuffer_as_string, METH_VARARGS, NULL}, {"get_releasebuffer_cnt", (PyCFunction)get_releasebuffer_cnt, METH_NOARGS, NULL}, {"create_and_release_buffer", (PyCFunction)create_and_release_buffer, METH_O, NULL}, + {"same_dealloc", (PyCFunction)same_dealloc, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -111,6 +111,18 @@ res = [1, 2, 3] * arr assert res == [2, 4, 6] + def test_subclass_dealloc(self): + module = self.import_module(name='array') + class Sub(module.array): + pass + + arr = Sub('i', [2]) + module.readbuffer_as_string(arr) + class A(object): + pass + assert not module.same_dealloc(arr, module.array('i', [2])) + assert module.same_dealloc(arr, A()) + def test_subclass(self): import struct module = self.import_module(name='array') From pypy.commits at gmail.com Wed Jul 26 07:03:27 2017 From: pypy.commits at gmail.com (Dodan) Date: Wed, 26 Jul 2017 04:03:27 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-sendmsg-recvmsg: Tests in regrtest/test_socket pass successfully. Memory leaks fixed Message-ID: <597876ff.c7331c0a.1dac4.b5a6@mx.google.com> Author: Dodan Mihai Branch: py3.5-sendmsg-recvmsg Changeset: r91972:aaa84a0a699e Date: 2017-07-26 13:59 +0300 http://bitbucket.org/pypy/pypy/changeset/aaa84a0a699e/ Log: Tests in regrtest/test_socket pass successfully. Memory leaks fixed diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -329,6 +329,13 @@ @unwrap_spec(size=int) def CMSG_SPACE(space, size): + """ + Socket method to determine the optimal byte size of the ancillary. + Recommended to be used when computing the ancillary size for recvmsg. + :param space: + :param size: an integer with the minimum size required. + :return: an integer with the minimum memory needed for the required size. The value is memory alligned + """ if size < 0: raise oefmt(space.w_OverflowError, "CMSG_SPACE() argument out of range") @@ -340,6 +347,13 @@ @unwrap_spec(len=int) def CMSG_LEN(space, len): + """ + Socket method to determine the optimal byte size of the ancillary. + Recommended to be used when computing the ancillary size for recvmsg. + :param space: + :param len: an integer with the minimum size required. + :return: an integer with the minimum memory needed for the required size. The value is not mem alligned. + """ if len < 0: raise oefmt(space.w_OverflowError, "CMSG_LEN() argument out of range") diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -450,6 +450,25 @@ @unwrap_spec(message_size=int, ancbufsize=int, flags=int) def recvmsg_w(self,space,message_size, ancbufsize = 0, flags = 0): + """ + recvfrom(message_size[, ancbufsize[, flags]]) -> (message, ancillary, flags, address) + recvmsg(message_size, [ancbufsize,[flags]]) -> (message, ancillary, flags, address) + Receive normal data (up to bufsize bytes) and ancillary data from the socket. + The ancbufsize argument sets the size in bytes of the internal buffer used to receive the ancillary data; + it defaults to 0, meaning that no ancillary data will be received. + Appropriate buffer sizes for ancillary data can be calculated using CMSG_SPACE() or CMSG_LEN(), + and items which do not fit into the buffer might be truncated or discarded. + The flags argument defaults to 0 and has the same meaning as for recv(). + The ancdata item is a list of zero or more tuples (cmsg_level, cmsg_type, cmsg_data): + cmsg_level and cmsg_type are integers specifying the protocol level and protocol-specific type respectively, + and cmsg_data is a bytes object holding the associated data. + + :param space: Non useable parameter. It represents the object space. + :param message_size: Maximum size of the message to be received + :param ancbufsize: Maximum size of the ancillary data to be received + :param flags: Receive flag. For more details, please check the Unix manual + :return: a tuple consisting of the message, the ancillary data, return flag and the address. + """ if (message_size < 0): raise oefmt(space.w_ValueError, "negative buffer size in recvmsg()") if ancbufsize < 0: @@ -476,10 +495,6 @@ except SocketError as e: converted_error(space, e, eintr_retry=True) - - - - @unwrap_spec(data='bufferstr', flags=int) def send_w(self, space, data, flags=0): """send(data[, flags]) -> count @@ -535,10 +550,22 @@ converted_error(space, e, eintr_retry=True) return space.newint(count) - #@unwrap_spec(data='bufferstr', flags = int) def sendmsg_w(self, space, w_data, w_ancillary=None, w_flags=None ,w_address=None): - """sendmsg(messages, [ancillaries, [flags, [address]]]) """ + sendmsg(data[,ancillary[,flags[,address]]]) -> bytes_sent + Send normal and ancillary data to the socket, gathering the non-ancillary data + from a series of buffers and concatenating it into a single message. + The ancdata argument specifies the ancillary data (control messages) as an iterable of zero or more tuples + (cmsg_level, cmsg_type, cmsg_data), where cmsg_level and cmsg_type are integers specifying the protocol level + and protocol-specific type respectively, and cmsg_data is a bytes-like object holding the associated data. + :param space: Represents the object space. + :param w_data: The message(s). needs to be a bytes like object + :param w_ancillary: needs to be a sequence object Can remain unspecified. + :param w_flags: needs to be an integer. Can remain unspecified. + :param w_address: needs to be a bytes-like object Can remain unspecified. + :return: Bytes sent from the message + """ + # Get the flag and address from the object space flags = 0 if space.is_none(w_flags) is False: flags = space.int_w(w_flags) @@ -547,6 +574,7 @@ if space.is_none(w_address) is False: address = self.addr_from_object(space, w_address) + # find data's type in the ObjectSpace and get a list of string out of it. data = [] if (w_data.typedef.name == 'list'): for i in w_data.getitems(): @@ -574,8 +602,9 @@ if not e.match(space,space.w_StopIteration): raise break + + # find the ancillary's type in the ObjectSpace and get a list of tuples out of it. ancillary = [] - if w_ancillary is not None: if (space.isinstance_w(w_ancillary,space.w_list)): for i in w_ancillary.getitems(): @@ -594,19 +623,22 @@ tup = (level, type, cont) ancillary.append(tup) else: - raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence of length 3") + raise oefmt(space.w_TypeError, + "[sendmsg() ancillary data items]() argument must be sequence of length 3") else: while True: try: if (space.is_generator(w_ancillary) is False): - raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence") + raise oefmt(space.w_TypeError, + "[sendmsg() ancillary data items]() argument must be sequence") i = space.next(w_ancillary) if (space.isinstance_w(i, space.w_tuple) is False): raise oefmt(space.w_TypeError, "[sendmsg() ancillary data items]() argument must be sequence of length 3") if (space.len_w(i) != 3): - raise oefmt(space.w_TypeError,"[sendmsg() ancillary data items]() argument must be sequence of length 3") + raise oefmt(space.w_TypeError, + "[sendmsg() ancillary data items]() argument must be sequence of length 3") except OperationError as e: if not e.match(space,space.w_StopIteration): raise diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -357,37 +357,37 @@ 'errno.h', 'limits.h', 'stdio.h', - 'sys/types.h'] + 'sys/types.h', + 'netinet/in.h', + 'arpa/inet.h'] separate_module_sources = [''' - //defines for recvmsg - #define SUCCESS 0 - #define BAD_MSG_SIZE_GIVEN -1 - #define BAD_ANC_SIZE_GIVEN -2 - #define WOULD_BLOCK -3 - #define AGAIN -4 - #define BADDESC -5 - #define CON_REF -6 - #define FAULT -7 - #define INTR -8 - #define NOMEM -9 - #define NOTCONN -10 - #define NOTSOCK -11 - #define MAL_ANC -12 + // special defines for returning from recvmsg + #define BAD_MSG_SIZE_GIVEN -10000 + #define BAD_ANC_SIZE_GIVEN -10001 + #define MAL_ANC -10002 - //defines for sendmsg + // special defines for returning from sendmsg #define MUL_MSGS_NOT_SUP -1000 #define ANC_DATA_TOO_LARGE -1001 #define ANC_DATA_TOO_LARGEX -1002 - #define MSG_IOVLEN 1 // CPyhton has hardcoded this as well. + /* + Even though you could, theoretically, receive more than one message, IF you set the socket option, + CPython has hardcoded the message number to 1, and implemented the option to receive more then 1 in a + different socket method: recvmsg_into + */ + #define MSG_IOVLEN 1 // CPython has hardcoded this as well. #if INT_MAX > 0x7fffffff #define SOCKLEN_T_LIMIT 0x7fffffff #else #define SOCKLEN_T_LIMIT INT_MAX #endif + // ################################################################################################ + // Recvmsg implementation and associated functions + // Taken from CPython. Determines the minimum memory space required for the ancillary data. #ifdef CMSG_SPACE static int cmsg_min_space(struct msghdr *msg, struct cmsghdr *cmsgh, size_t space) @@ -428,8 +428,8 @@ } #endif + // Taken from CPython. #ifdef CMSG_LEN - /* If pointer CMSG_DATA(cmsgh) is in buffer msg->msg_control, set *space to number of bytes following it in the buffer and return true; otherwise, return false. Assumes cmsgh, msg->msg_control and @@ -449,6 +449,7 @@ return 1; } + // Taken from CPython. /* If cmsgh is invalid or not contained in the buffer pointed to by msg->msg_control, return -1. If cmsgh is valid and its associated data is entirely contained in the buffer, set *data_len to the @@ -476,23 +477,35 @@ } #endif /* CMSG_LEN */ + /* + Structure meant to hold the information received after a recvmsg is performed. + Essentially it holds: the address, the message, the ancillary data and the return flags. + I use this structure for 2 main reasons: + - keep things ordered + - some of the ancillary parameters need to be int not long (rffi SignedP is actually long*), + therefore I cannot use the parameters directly + */ struct recvmsg_info { - int error_code; - struct sockaddr* address; + struct sockaddr* address; // address fields socklen_t addrlen; - int* length_of_messages; + int* length_of_messages; // message fields char** messages; int no_of_messages; - int size_of_ancillary; + int size_of_ancillary; // ancillary fields int* levels; int* types; char** file_descr; int* descr_per_ancillary; - int flags; + int retflag; // return flag field }; - + /* + Wrapper function over recvmsg. Since it returns a lot of data, + in a structure that is hard to parse in rffi, it was implemented in C. + All the parameters, save the socket fd, message_size, ancillary_size + will be malloc'd and/or modified. + */ RPY_EXTERN int recvmsg_implementation( int socket_fd, @@ -505,11 +518,11 @@ char** messages, int* no_of_messages, int* size_of_ancillary, - int** levels, - int** types, + long** levels, + long** types, char** file_descr, - int** descr_per_ancillary, - int* flag) + long** descr_per_ancillary, + int* retflag) { @@ -521,35 +534,36 @@ int cmsg_status; struct iovec iov; struct recvmsg_info* retinfo; - int error_flag; + int error_flag; // variable to be set in case of special errors. int cmsgdatalen = 0; - //allocation flags for failure + // variables that are set to 1, if the message charp has been allocated + // and if the ancillary variables have been allocated. To be used in case of failure. int iov_alloc = 0; int anc_alloc = 0; retinfo = (struct recvmsg_info*) malloc(sizeof(struct recvmsg_info)); - /* - if (message_size < 0){ - error_flag = BAD_MSG_SIZE_GIVEN; - goto fail; - } - */ + if (ancillary_size > SOCKLEN_T_LIMIT){ error_flag = BAD_ANC_SIZE_GIVEN; goto fail; } - + // Setup the messages iov struct memory iov.iov_base = (char*) malloc(message_size); memset(iov.iov_base, 0, message_size); iov.iov_len = message_size; + + // Setup the ancillary buffer memory controlbuf = malloc(ancillary_size); - recvd_addrlen = sizeof(struct sockaddr); + + // Setup the recv address memory + recvd_addrlen = sizeof(struct sockaddr_storage); recvd_address = (struct sockaddr*) malloc(recvd_addrlen); memset(recvd_address, 0,recvd_addrlen); + // Setup the msghdr struct msg.msg_name = recvd_address; msg.msg_namelen = recvd_addrlen; msg.msg_iov = &iov; @@ -557,64 +571,32 @@ msg.msg_control = controlbuf; msg.msg_controllen = ancillary_size; + // Link my structure to the msghdr fields retinfo->address = msg.msg_name; retinfo->length_of_messages = (int*) malloc (MSG_IOVLEN * sizeof(int)); - retinfo->no_of_messages = 1; + retinfo->no_of_messages = MSG_IOVLEN; retinfo->messages = (char**) malloc (MSG_IOVLEN * sizeof(char*)); retinfo->messages[0] = msg.msg_iov->iov_base; iov_alloc = 1; - ssize_t bytes_recvd = 0; bytes_recvd = recvmsg(socket_fd, &msg, flags); if (bytes_recvd < 0){ - switch (errno){ - case EAGAIN: - error_flag = -3; - break; - case EBADF: - error_flag = -5; - break; - case ECONNREFUSED: - error_flag = -6; - break; - case EFAULT: - error_flag = -7; - break; - case EINTR: - error_flag = -8; - break; - case ENOMEM: - error_flag = -9; - break; - case ENOTCONN: - error_flag = -10; - break; - case ENOTSOCK: - error_flag = -11; - break; - } - goto fail; } retinfo->addrlen = (socklen_t) msg.msg_namelen; retinfo->length_of_messages[0] = msg.msg_iov->iov_len; - + // Count the ancillary items & allocate the memory int anc_counter = 0; - /* - struct recv_list* first_item = (struct recv_list*) malloc(sizeof(struct recv_list)); - struct recv_list* iter = first_item; - */ for (cmsgh = ((msg.msg_controllen > 0) ? CMSG_FIRSTHDR(&msg) : NULL); cmsgh != NULL; cmsgh = CMSG_NXTHDR(&msg, cmsgh)) { anc_counter++; } - retinfo->size_of_ancillary = anc_counter; retinfo->file_descr = (char**) malloc (anc_counter * sizeof(char*)); retinfo->levels = (int*) malloc(anc_counter * sizeof(int)); @@ -622,6 +604,7 @@ retinfo->descr_per_ancillary = (int*) malloc(anc_counter * sizeof(int)); anc_alloc = 1; + // Extract the ancillary items int i=0; for (cmsgh = ((msg.msg_controllen > 0) ? CMSG_FIRSTHDR(&msg) : NULL); cmsgh != NULL; cmsgh = CMSG_NXTHDR(&msg, cmsgh)) { @@ -639,26 +622,20 @@ i++; } - retinfo->flags = msg.msg_flags; - retinfo->error_code = 0; + retinfo->retflag = msg.msg_flags; - //address = (struct sockaddr*) malloc (sizeof(struct sockaddr)); - memcpy(address,retinfo->address,sizeof(struct sockaddr)); + // Set the parameters of address + memcpy(address,retinfo->address,retinfo->addrlen); + *addrlen = retinfo->addrlen; - - *addrlen = retinfo->addrlen; + // Set the parameters of message *no_of_messages = retinfo->no_of_messages; *size_of_ancillary = retinfo->size_of_ancillary; - *length_of_messages = (int*) malloc (sizeof(int) * retinfo->no_of_messages); - //*length_of_messages = memcpy(*length_of_messages, retinfo->length_of_messages, sizeof(int) * retinfo->no_of_messages); - int counter = 0; for (i=0; i< retinfo->no_of_messages; i++) counter += retinfo->length_of_messages[i]; - - //*messages = (char*) malloc(sizeof(char) * counter); memset(*messages, 0, sizeof(char) * counter); counter = 0; for(i=0; i< retinfo->no_of_messages; i++){ @@ -666,20 +643,18 @@ counter += retinfo->length_of_messages[i]; } - *levels = (int*) malloc (sizeof(int) * retinfo->size_of_ancillary); - //*levels = - memcpy(*levels, retinfo->levels, sizeof(int) * retinfo->size_of_ancillary); - *types = (int*) malloc (sizeof(int) * retinfo->size_of_ancillary); - //*types = - memcpy(*types, retinfo->types, sizeof(int) * retinfo->size_of_ancillary); - *descr_per_ancillary = (int*) malloc (sizeof(int) * retinfo->size_of_ancillary); - //*descr_per_ancillary = - memcpy(*descr_per_ancillary, retinfo->descr_per_ancillary, sizeof(int) * retinfo->size_of_ancillary); - + // Set the parameters of ancillary + *levels = (long*) malloc (sizeof(long) * retinfo->size_of_ancillary); + *types = (long*) malloc (sizeof(long) * retinfo->size_of_ancillary); + *descr_per_ancillary = (long*) malloc (sizeof(long) * retinfo->size_of_ancillary); counter = 0; - for (i=0; i < retinfo->size_of_ancillary; i++) + for (i=0; i < retinfo->size_of_ancillary; i++){ counter += retinfo->descr_per_ancillary[i]; - + // Convert the int* to long* + levels[0][i] = (long) retinfo->levels[i]; + types[0][i] = (long) retinfo->types[i]; + descr_per_ancillary[0][i] = (long) retinfo->descr_per_ancillary[i]; + } *file_descr = (char*) malloc (sizeof(char) * counter); memset(*file_descr, 0, sizeof(char) * counter); counter = 0; @@ -688,13 +663,10 @@ counter += retinfo->descr_per_ancillary[i]; } - *flag = retinfo->flags; - //int k; - //char* dsadas; - //dsadas = (char*) (*file_descr[0]); - //for (k=0; kno_of_messages * sizeof(int); k++) - // printf("0x%X ", dsadas[k]); + // Set the retflag + *retflag = retinfo->retflag; + // Free the memory free(retinfo->address); free(retinfo->length_of_messages); free(retinfo->levels); @@ -721,17 +693,8 @@ free(retinfo->messages[0]); free(retinfo->messages); free(retinfo->address); + free(retinfo); free(controlbuf); - file_descr = NULL; - levels = NULL; - types = NULL; - descr_per_ancillary = NULL; - length_of_messages = NULL; - messages =NULL; - address = NULL; - addrlen = NULL; - no_of_messages = NULL; - size_of_ancillary = NULL; }else{ if (iov_alloc){ @@ -740,22 +703,14 @@ free(retinfo->messages); free(retinfo->address); free(controlbuf); - length_of_messages = NULL; - messages =NULL; - address = NULL; - file_descr = NULL; - levels = NULL; - types = NULL; - descr_per_ancillary = NULL; - addrlen = NULL; - no_of_messages = NULL; - size_of_ancillary = NULL; - + free(retinfo); } } return error_flag; err_closefds: + // Special case for UNIX sockets. In case file descriptors are received, they need to be closed. + // Taken from CPython #ifdef SCM_RIGHTS /* Close all descriptors coming from SCM_RIGHTS, so they don't leak. */ for (cmsgh = ((msg.msg_controllen > 0) ? CMSG_FIRSTHDR(&msg) : NULL); @@ -783,8 +738,8 @@ } - //################################################################################################ - //send goes from here + // ################################################################################################ + // Sendmsg implementation and associated functions #ifdef CMSG_LEN static int @@ -822,8 +777,28 @@ } #endif + /* + sendmsg_implementation is a wrapper over sendmsg of the API. + It was inspired from the way CPython did their implementation of this. + The main reason that it was written in C, is the struct msghdr, + which contains the ancillary data in a linked list of cmsghdr structures. + It was simpler to use it in C, and then push the simpler types of data via rffi. + */ RPY_EXTERN - int sendmsg_implementation(int socket, struct sockaddr* address, socklen_t addrlen, long* length_of_messages, char** messages, int no_of_messages, long* levels, long* types, char** file_descriptors, long* no_of_fds, int control_length, int flag ) + int sendmsg_implementation + (int socket, + struct sockaddr* address, + socklen_t addrlen, + long* length_of_messages, + char** messages, + int no_of_messages, + long* levels, + long* types, + char** file_descriptors, + long* no_of_fds, + int control_length, + int flag + ) { struct msghdr msg = {0}; @@ -832,15 +807,16 @@ int retval; size_t i; + // Prepare the msghdr structure for the send: + // Add the address - if (address != NULL) { msg.msg_name = address; msg.msg_namelen = addrlen; } + // Add the message struct iovec *iovs = NULL; - if (no_of_messages > 0){ iovs = (struct iovec*) malloc(no_of_messages * sizeof(struct iovec)); @@ -853,8 +829,8 @@ iovs[i].iov_len = length_of_messages[i]; } } + // Add the ancillary - #ifndef CMSG_SPACE if (control_length > 1){ free(iovs); @@ -862,7 +838,9 @@ } #endif if (control_length > 0){ + //compute the total size of the ancillary + //getting the exact amount of space can be tricky and os dependent. size_t total_size_of_ancillary = 0; size_t space; size_t controllen = 0, controllen_last = 0; @@ -884,16 +862,14 @@ return ANC_DATA_TOO_LARGEX; } controllen_last = controllen; - } - controlbuf = malloc(controllen); //* sizeof(int) - + controlbuf = malloc(controllen); msg.msg_control= controlbuf; msg.msg_controllen = controllen; + // memset controlbuf to 0 to avoid trash in the ancillary memset(controlbuf, 0, controllen); - cmsg = NULL; for (i = 0; i< control_length; i++){ cmsg = (i == 0) ? CMSG_FIRSTHDR(&msg) : CMSG_NXTHDR(&msg, cmsg); @@ -912,6 +888,7 @@ // Send the data retval = sendmsg(socket, &msg, flag); + // free everything that was allocated here, and we would not need in rsocket if (iovs != NULL) free(iovs); if (controlbuf !=NULL) @@ -919,6 +896,14 @@ return retval; } + + // ################################################################################################ + // Wrappers for CMSG_SPACE and CMSG_LEN + + /* + These 2 functions are wrappers over sys/socket.h's CMSG_SPACE and CMSG_LEN. + They are identical to CPython's. + */ #ifdef CMSG_SPACE RPY_EXTERN size_t CMSG_SPACE_wrapper(size_t desired_space){ @@ -931,7 +916,6 @@ #endif #ifdef CMSG_LEN - RPY_EXTERN size_t CMSG_LEN_wrapper(size_t desired_len){ size_t result; @@ -942,14 +926,22 @@ } #endif + // ################################################################################################ + // Extra functions that I needed + + /* + This function is used to memcpy from a char* at an offset. + Could not get rffi.c_memcpy to do it at an offset, so I made my own. + */ RPY_EXTERN - char* memcpy_from_CCHARP_at_offset_and_size(char* string, int offset, int size){ - char* buffer; - buffer = (char*)malloc(sizeof(char)*size); - buffer = memcpy(buffer, string + offset, size); - return buffer; + int memcpy_from_CCHARP_at_offset_and_size(char* stringfrom, char** stringto, int offset, int size){ + *stringto = memcpy(*stringto, stringfrom + offset, size); + return 0; } + /* + These functions free memory that was allocated in C (sendmsg or recvmsg) was used in rsocket and now needs cleanup + */ RPY_EXTERN int free_pointer_to_signedp(int** ptrtofree){ free(*ptrtofree); @@ -967,7 +959,7 @@ post_include_bits =[ "RPY_EXTERN " "int sendmsg_implementation(int socket, struct sockaddr* address, socklen_t addrlen, long* length_of_messages, char** messages, int no_of_messages, long* levels, long* types, char** file_descriptors, long* no_of_fds, int control_length, int flag );\n" "RPY_EXTERN " - "int recvmsg_implementation(int socket_fd, int message_size, int ancillary_size, int flags, struct sockaddr* address, socklen_t* addrlen, int** length_of_messages, char** messages, int* no_of_messages, int* size_of_ancillary, int** levels, int** types, char** file_descr, int** descr_per_ancillary, int* flag);\n" + "int recvmsg_implementation(int socket_fd, int message_size, int ancillary_size, int flags, struct sockaddr* address, socklen_t* addrlen, int** length_of_messages, char** messages, int* no_of_messages, int* size_of_ancillary, long** levels, long** types, char** file_descr, long** descr_per_ancillary, int* flag);\n" "static " "int cmsg_min_space(struct msghdr *msg, struct cmsghdr *cmsgh, size_t space);\n" "static " @@ -983,32 +975,13 @@ "RPY_EXTERN " "size_t CMSG_SPACE_wrapper(size_t desired_space);\n" "RPY_EXTERN " - "char* memcpy_from_CCHARP_at_offset_and_size(char* string, int offset, int size);\n" + "int memcpy_from_CCHARP_at_offset_and_size(char* stringfrom, char** stringto, int offset, int size);\n" "RPY_EXTERN " "int free_pointer_to_signedp(int** ptrtofree);\n" "RPY_EXTERN " "int free_ptr_to_charp(char** ptrtofree);\n" ] - #CConfig.SignedPP = lltype.Ptr(lltype.Array(rffi.SIGNEDP, hints={'nolength': True})) - - - # CConfig.recvmsginfo = platform.Struct('struct recvmsg_info', - # [('error_code',rffi.SIGNED), - # ('address',sockaddr_ptr), - # ('addrlen',socklen_t_ptr), - # ('length_of_messages', rffi.SIGNEDP), - # ('messages',rffi.CCHARPP), - # ('no_of_messages',rffi.INT), - # ('size_of_ancillary',rffi.INT), - # ('levels', rffi.SIGNEDP), - # ('types', rffi.SIGNEDP), - # ('file_descr', rffi.CCHARPP), - # ('descr_per_ancillary', rffi.SIGNEDP), - # ('flags', rffi.INT), - # ]) - - # compilation_info = ExternalCompilationInfo( includes=includes, @@ -1252,7 +1225,7 @@ compilation_info=compilation_info)) memcpy_from_CCHARP_at_offset = jit.dont_look_inside(rffi.llexternal("memcpy_from_CCHARP_at_offset_and_size", - [rffi.CCHARP,rffi.INT,rffi.INT],rffi.CCHARP,save_err=SAVE_ERR,compilation_info=compilation_info)) + [rffi.CCHARP, rffi.CCHARPP,rffi.INT,rffi.INT],rffi.INT,save_err=SAVE_ERR,compilation_info=compilation_info)) freeccharp = jit.dont_look_inside(rffi.llexternal("free_ptr_to_charp", [rffi.CCHARPP],rffi.INT,save_err=SAVE_ERR,compilation_info=compilation_info)) freesignedp = jit.dont_look_inside(rffi.llexternal("free_pointer_to_signedp", diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -932,6 +932,7 @@ address.addrlen = addrlen else: address = None + print address data = buf.str(read_bytes) return (data, address) raise self.error_handler() @@ -965,14 +966,20 @@ @jit.dont_look_inside def recvmsg(self, message_size, ancbufsize = 0, flags = 0): + """ + Receive up to message_size bytes from a message. Also receives ancillary data. + Returns the message, ancillary, flag and address of the sender. + :param message_size: Maximum size of the message to be received + :param ancbufsize: Maximum size of the ancillary data to be received + :param flags: Receive flag. For more details, please check the Unix manual + :return: a tuple consisting of the message, the ancillary data, return flag and the address. + """ if message_size < 0: raise RSocketError("Invalid message size") if ancbufsize < 0: raise RSocketError("invalid ancillary data buffer length") - # addr, maxlen = make_null_address(self.family) - # addrlen_p = lltype.malloc(_c.socklen_t_ptr.TO, flavor='raw') - # addrlen_p[0] = rffi.cast(_c.socklen_t, maxlen) + self.wait_for_data(False) address, addr_p, addrlen_p = self._addrbuf() len_of_msgs = lltype.malloc(rffi.SIGNEDPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False) messages = lltype.malloc(rffi.CCHARPP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) @@ -989,6 +996,7 @@ retflag = lltype.malloc(rffi.SIGNEDP.TO,1,flavor='raw',track_allocation=True,nonmovable=False ) retflag[0] = rffi.cast(rffi.SIGNED,0) + # a mask for the SIGNEDP's that need to be cast to int. (long default) LONG_MASK = 2**32 - 1 reply = _c.recvmsg(self.fd, rffi.cast(lltype.Signed,message_size), rffi.cast(lltype.Signed,ancbufsize),rffi.cast(lltype.Signed,flags), @@ -1008,55 +1016,74 @@ offset = 0 list_of_tuples = [] + + pre_anc = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw', track_allocation=True, nonmovable=False) for i in range(anc_size): - x = rffi.cast(rffi.SIGNED, levels[0][i]) - x &= LONG_MASK - level = x - x = rffi.cast(rffi.SIGNED,types[0][i]) - x &= LONG_MASK - type = x - x = rffi.cast(rffi.SIGNED,descr_per_anc[0][i]) - x &= LONG_MASK - bytes_in_anc = x - pre_anc = _c.memcpy_from_CCHARP_at_offset(file_descr[0],rffi.cast(rffi.SIGNED,offset), bytes_in_anc) - anc = rffi.charpsize2str(pre_anc,bytes_in_anc) + level = rffi.cast(rffi.SIGNED, levels[0][i]) + type = rffi.cast(rffi.SIGNED, types[0][i]) + bytes_in_anc = rffi.cast(rffi.SIGNED, descr_per_anc[0][i]) + pre_anc[0] = lltype.malloc(rffi.CCHARP.TO, bytes_in_anc,flavor='raw',track_allocation=True,nonmovable=False) + _c.memcpy_from_CCHARP_at_offset(file_descr[0], pre_anc,rffi.cast(rffi.SIGNED,offset), bytes_in_anc) + anc = rffi.charpsize2str(pre_anc[0],bytes_in_anc) tup = (level,type, anc) list_of_tuples.append(tup) offset += bytes_in_anc - #lltype.free(pre_anc, flavor='raw') - #address.unlock() + lltype.free(pre_anc[0], flavor='raw') + if addrlen: address.addrlen = addrlen else: + address.unlock() address = None - rettup = (retmsg,list_of_tuples,returnflag,address) - #free underlying complexity first if address is not None: address.unlock() - # lltype.free(messages[0],flavor='raw') + # free underlying complexity first _c.freeccharp(file_descr) _c.freesignedp(len_of_msgs) _c.freesignedp(levels) _c.freesignedp(types) _c.freesignedp(descr_per_anc) + lltype.free(messages[0], flavor='raw') + lltype.free(pre_anc,flavor='raw') lltype.free(messages,flavor='raw') lltype.free(file_descr,flavor='raw') lltype.free(len_of_msgs,flavor='raw') lltype.free(no_of_messages, flavor='raw') lltype.free(size_of_anc, flavor='raw') lltype.free(levels, flavor='raw') + lltype.free(types, flavor='raw') lltype.free(descr_per_anc, flavor='raw') lltype.free(retflag, flavor='raw') lltype.free(addrlen_p,flavor='raw') return rettup else: + + #in case of failure the underlying complexity has already been freed + lltype.free(messages[0], flavor='raw') + lltype.free(messages, flavor='raw') + lltype.free(file_descr, flavor='raw') + lltype.free(len_of_msgs, flavor='raw') + lltype.free(no_of_messages, flavor='raw') + lltype.free(size_of_anc, flavor='raw') + lltype.free(levels, flavor='raw') + lltype.free(types, flavor='raw') + lltype.free(descr_per_anc, flavor='raw') + lltype.free(retflag, flavor='raw') + lltype.free(addrlen_p, flavor='raw') + if address is not None: address.unlock() + if (reply == -10000): + raise RSocketError("Invalid message size") + if (reply == -10001): + raise RSocketError("Invalid ancillary data buffer length") + if (reply == -10002): + raise RSocketError("received malformed or improperly truncated ancillary data") raise last_error() @@ -1109,8 +1136,16 @@ @jit.dont_look_inside def sendmsg(self, messages, ancillary=None, flags=0, address=None): - # addr = address.lock() - # addrlen = address.addrlen + """ + Send data and ancillary on a socket. For use of ancillary data, please check the Unix manual. + Work on connectionless sockets via the address parameter. + :param messages: a message that is a list of strings + :param ancillary: data to be sent separate from the message body. Needs to be a list of tuples. + E.g. [(level,type, bytes),...]. Default None. + :param flags: the flag to be set for sendmsg. Please check the Unix manual regarding values. Default 0 + :param address: address of the recepient. Useful for when sending on connectionless sockets. Default None + :return: Bytes sent from the message + """ need_to_free_address = True if address is None: need_to_free_address = False @@ -1127,7 +1162,6 @@ for message in messages: messages_ptr[counter] = rffi.str2charp(message) messages_length_ptr[counter] = rffi.cast(rffi.SIGNED, len(message)) - #messages_length_ptr[counter] = rffi.cast(rffi.SIGNED, 0x00cabc00abcabc00) counter += 1 messages_ptr[counter] = lltype.nullptr(rffi.CCHARP.TO) if ancillary is not None: @@ -1146,13 +1180,14 @@ levels[counter] = rffi.cast(rffi.SIGNED,level) types[counter] = rffi.cast(rffi.SIGNED,type) desc_per_ancillary[counter] = rffi.cast(rffi.SIGNED, (len(content))) - #file_descr[counter] = lltype.malloc(rffi.CCHARP.TO,len(content),flavor='raw',zero=True, track_allocation=True,nonmovable=False) file_descr[counter] = rffi.str2charp(content, track_allocation=True) counter +=1 else: size_of_ancillary = 0 snd_no_msgs = rffi.cast(rffi.SIGNED, no_of_messages) snd_anc_size =rffi.cast(rffi.SIGNED, size_of_ancillary) + + bytes_sent = _c.sendmsg(self.fd, addr, addrlen, messages_length_ptr, messages_ptr, snd_no_msgs,levels,types,file_descr,desc_per_ancillary,snd_anc_size,flags) @@ -1171,6 +1206,7 @@ lltype.free(levels, flavor='raw', track_allocation=True) lltype.free(file_descr, flavor='raw', track_allocation=True) + self.wait_for_data(True) if (bytes_sent < 0) and (bytes_sent!=-1000) and (bytes_sent!=-1001) and (bytes_sent!=-1002): raise last_error() @@ -1361,12 +1397,24 @@ if _c._POSIX: def CMSG_LEN( demanded_len): + """ + Socket method to determine the optimal byte size of the ancillary. + Recommended to be used when computing the ancillary size for recvmsg. + :param demanded_len: an integer with the minimum size required. + :return: an integer with the minimum memory needed for the required size. The value is not memory alligned + """ if demanded_len < 0: return 0 result = _c.CMSG_LEN(demanded_len) return result def CMSG_SPACE( demanded_size): + """ + Socket method to determine the optimal byte size of the ancillary. + Recommended to be used when computing the ancillary size for recvmsg. + :param demanded_size: an integer with the minimum size required. + :return: an integer with the minimum memory needed for the required size. The value is memory alligned + """ if demanded_size < 0: return 0 result = _c.CMSG_SPACE(demanded_size) From pypy.commits at gmail.com Thu Jul 27 08:41:53 2017 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 27 Jul 2017 05:41:53 -0700 (PDT) Subject: [pypy-commit] pypy default: document that methods also use the special-casing of is and id Message-ID: <5979df91.d2d31c0a.75ecd.e26b@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r91974:bff059fd0c08 Date: 2017-07-27 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/bff059fd0c08/ Log: document that methods also use the special-casing of is and id diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -330,6 +330,8 @@ - ``frozenset`` (empty frozenset only) + - unbound and bound method objects + This change requires some changes to ``id`` as well. ``id`` fulfills the following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the above types will return a value that is computed from the argument, and can From pypy.commits at gmail.com Thu Jul 27 08:53:32 2017 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 27 Jul 2017 05:53:32 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: fix syntax error Message-ID: <5979e24c.10841c0a.b3c56.46e3@mx.google.com> Author: Carl Friedrich Bolz Branch: py3.5 Changeset: r91975:ad2a4cd1e7a7 Date: 2017-07-27 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/ad2a4cd1e7a7/ Log: fix syntax error (bad armin, no cookie) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -411,7 +411,7 @@ val = lib.mvwget_wch(self._win, *args, wch) else: raise error("get_wch requires 0 or 2 arguments") - _check_ERR(val, "get_wch"): + _check_ERR(val, "get_wch") return wch[0] def getkey(self, *args): From pypy.commits at gmail.com Thu Jul 27 09:48:57 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 06:48:57 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2619 Message-ID: <5979ef49.87da1c0a.c5b30.ac6c@mx.google.com> Author: Armin Rigo Branch: Changeset: r91976:e71aec0042dd Date: 2017-07-27 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/e71aec0042dd/ Log: Issue #2619 Experimental: remove the special case for the identity of *bound* method objects. This depends on CPython, where it seems that no strange internal type exists where the equivalent of ``x.method is x.method`` would return True. (This is unlike unbound methods, where e.g. ``list.append is list.append`` returns True; this is why the special case remains for *unbound* method objects.) diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -330,7 +330,7 @@ - ``frozenset`` (empty frozenset only) - - unbound and bound method objects + - unbound method objects This change requires some changes to ``id`` as well. ``id`` fulfills the following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -559,21 +559,29 @@ return space.newbool(space.eq_w(self.w_function, w_other.w_function)) def is_w(self, space, other): + if self.w_instance is not None: + return W_Root.is_w(self, space, other) + # The following special-case is only for *unbound* method objects. + # Motivation: in CPython, it seems that no strange internal type + # exists where the equivalent of ``x.method is x.method`` would + # return True. This is unlike unbound methods, where e.g. + # ``list.append is list.append`` returns True. The following code + # is here to emulate that behaviour. Unlike CPython, we return + # True for all equal unbound methods, not just for built-in types. if not isinstance(other, Method): return False - return (self.w_instance is other.w_instance and + return (other.w_instance is None and self.w_function is other.w_function and self.w_class is other.w_class) def immutable_unique_id(self, space): - from pypy.objspace.std.util import IDTAG_METHOD as tag + if self.w_instance is not None: + return W_Root.immutable_unique_id(self, space) + # the special-case is only for *unbound* method objects + # + from pypy.objspace.std.util import IDTAG_UNBOUND_METHOD as tag from pypy.objspace.std.util import IDTAG_SHIFT - if self.w_instance is not None: - id = space.bigint_w(space.id(self.w_instance)) - id = id.lshift(LONG_BIT) - else: - id = rbigint.fromint(0) - id = id.or_(space.bigint_w(space.id(self.w_function))) + id = space.bigint_w(space.id(self.w_function)) id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) id = id.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(id) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,4 +1,4 @@ -import pytest +import pytest, sys from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -342,6 +342,11 @@ raises(ValueError, type(f).__setstate__, f, (1, 2, 3)) class AppTestMethod: + def setup_class(cls): + cls.w_runappdirect_on_cpython = cls.space.wrap( + cls.runappdirect and + '__pypy__' not in sys.builtin_module_names) + def test_simple_call(self): class A(object): def func(self, arg2): @@ -572,7 +577,6 @@ assert meth == meth assert meth == MethodType(func, object) - @pytest.mark.skipif("config.option.runappdirect") def test_method_identity(self): class A(object): def m(self): @@ -589,19 +593,24 @@ a = A() a2 = A() - assert a.m is a.m - assert id(a.m) == id(a.m) - assert a.m is not a.n - assert id(a.m) != id(a.n) - assert a.m is not a2.m - assert id(a.m) != id(a2.m) + x = a.m; y = a.m + assert x is not y + assert id(x) != id(y) + assert x == y + assert x is not a.n + assert id(x) != id(a.n) + assert x is not a2.m + assert id(x) != id(a2.m) - assert A.m is A.m - assert id(A.m) == id(A.m) - assert A.m is not A.n - assert id(A.m) != id(A.n) - assert A.m is not B.m - assert id(A.m) != id(B.m) + if not self.runappdirect_on_cpython: + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m == A.m + x = A.m + assert x is not A.n + assert id(x) != id(A.n) + assert x is not B.m + assert id(x) != id(B.m) class TestMethod: diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -8,7 +8,7 @@ IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 -IDTAG_METHOD = 9 +IDTAG_UNBOUND_METHOD = 9 IDTAG_SPECIAL = 11 # -1 - (-maxunicode-1): unichar # 0 - 255: char # 256: empty string From pypy.commits at gmail.com Thu Jul 27 11:17:31 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 08:17:31 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Manual merge of c9e1134edc4a Message-ID: <597a040b.4aa8df0a.13b04.0b86@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91978:10415d8c98cb Date: 2017-07-27 17:06 +0200 http://bitbucket.org/pypy/pypy/changeset/10415d8c98cb/ Log: Manual merge of c9e1134edc4a diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -16,6 +16,8 @@ allow_surrogate_by_default = True BYTEORDER = sys.byteorder +BYTEORDER2 = BYTEORDER[0] + 'e' # either "le" or "be" +assert BYTEORDER2 in ('le', 'be') # python 2.7 has a preview of py3k behavior, so those functions # are used either when we're testing wide pypy on narrow cpython @@ -486,9 +488,31 @@ errorhandler, "little") return result, length +def py3k_str_decode_utf_16(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_16_helper(s, size, errors, final, + errorhandler, "native", + 'utf-16-' + BYTEORDER2) + return result, length + +def py3k_str_decode_utf_16_be(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_16_helper(s, size, errors, final, + errorhandler, "big", + 'utf-16-be') + return result, length + +def py3k_str_decode_utf_16_le(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_16_helper(s, size, errors, final, + errorhandler, "little", + 'utf-16-le') + return result, length + def str_decode_utf_16_helper(s, size, errors, final=True, errorhandler=None, - byteorder="native"): + byteorder="native", + public_encoding_name='utf16'): if errorhandler is None: errorhandler = default_unicode_error_decode bo = 0 @@ -546,7 +570,8 @@ if len(s) - pos < 2: if not final: break - r, pos = errorhandler(errors, 'utf16', "truncated data", + r, pos = errorhandler(errors, public_encoding_name, + "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 2: @@ -562,7 +587,8 @@ if not final: break errmsg = "unexpected end of data" - r, pos = errorhandler(errors, 'utf16', errmsg, s, pos, len(s)) + r, pos = errorhandler(errors, public_encoding_name, + errmsg, s, pos, len(s)) result.append(r) if len(s) - pos < 2: break @@ -578,12 +604,12 @@ (ch2 & 0x3FF)) + 0x10000)) continue else: - r, pos = errorhandler(errors, 'utf16', + r, pos = errorhandler(errors, public_encoding_name, "illegal UTF-16 surrogate", s, pos - 4, pos - 2) result.append(r) else: - r, pos = errorhandler(errors, 'utf16', + r, pos = errorhandler(errors, public_encoding_name, "illegal encoding", s, pos - 2, pos) result.append(r) @@ -592,7 +618,8 @@ def unicode_encode_utf_16_helper(s, size, errors, errorhandler=None, allow_surrogates=True, - byteorder='little'): + byteorder='little', + public_encoding_name='utf16'): if errorhandler is None: errorhandler = default_unicode_error_encode if size == 0: @@ -620,13 +647,13 @@ elif ch >= 0xE000 or allow_surrogates: _STORECHAR(result, ch, byteorder) else: - ru, rs, pos = errorhandler(errors, 'utf16', + ru, rs, pos = errorhandler(errors, public_encoding_name, 'surrogates not allowed', s, pos-1, pos) if rs is not None: # py3k only if len(rs) % 2 != 0: - errorhandler('strict', 'utf16', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) result.append(rs) @@ -635,7 +662,7 @@ if ord(ch) < 0xD800: _STORECHAR(result, ord(ch), byteorder) else: - errorhandler('strict', 'utf16', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) continue @@ -648,20 +675,39 @@ return unicode_encode_utf_16_helper(s, size, errors, errorhandler, allow_surrogates, "native") - def unicode_encode_utf_16_be(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_16_helper(s, size, errors, errorhandler, allow_surrogates, "big") - def unicode_encode_utf_16_le(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_16_helper(s, size, errors, errorhandler, allow_surrogates, "little") +def py3k_unicode_encode_utf_16(s, size, errors, + errorhandler=None, + allow_surrogates=True): + return unicode_encode_utf_16_helper(s, size, errors, errorhandler, + allow_surrogates, "native", + 'utf-16-' + BYTEORDER2) + +def py3k_unicode_encode_utf_16_be(s, size, errors, + errorhandler=None, + allow_surrogates=True): + return unicode_encode_utf_16_helper(s, size, errors, errorhandler, + allow_surrogates, "big", + 'utf-16-be') + +def py3k_unicode_encode_utf_16_le(s, size, errors, + errorhandler=None, + allow_surrogates=True): + return unicode_encode_utf_16_helper(s, size, errors, errorhandler, + allow_surrogates, "little", + 'utf-16-le') + # ____________________________________________________________ # utf-32 @@ -684,12 +730,34 @@ errorhandler, "little") return result, length +def py3k_str_decode_utf_32(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "native", + 'utf-32-' + BYTEORDER2) + return result, length + +def py3k_str_decode_utf_32_be(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "big", + 'utf-32-be') + return result, length + +def py3k_str_decode_utf_32_le(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "little", + 'utf-32-le') + return result, length + BOM32_DIRECT = intmask(0x0000FEFF) BOM32_REVERSE = intmask(0xFFFE0000) def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, - byteorder="native"): + byteorder="native", + public_encoding_name='utf32'): if errorhandler is None: errorhandler = default_unicode_error_decode bo = 0 @@ -744,7 +812,8 @@ if len(s) - pos < 4: if not final: break - r, pos = errorhandler(errors, 'utf32', "truncated data", + r, pos = errorhandler(errors, public_encoding_name, + "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: @@ -753,7 +822,8 @@ ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: - r, pos = errorhandler(errors, 'utf32', "codepoint not in range(0x110000)", + r, pos = errorhandler(errors, public_encoding_name, + "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue @@ -786,7 +856,8 @@ def unicode_encode_utf_32_helper(s, size, errors, errorhandler=None, allow_surrogates=True, - byteorder='little'): + byteorder='little', + public_encoding_name='utf32'): if errorhandler is None: errorhandler = default_unicode_error_encode if size == 0: @@ -808,13 +879,13 @@ ch2 = 0 if 0xD800 <= ch < 0xDC00: if not allow_surrogates: - ru, rs, pos = errorhandler(errors, 'utf32', + ru, rs, pos = errorhandler(errors, public_encoding_name, 'surrogates not allowed', s, pos-1, pos) if rs is not None: # py3k only if len(rs) % 4 != 0: - errorhandler('strict', 'utf32', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) result.append(rs) @@ -823,7 +894,7 @@ if ord(ch) < 0xD800: _STORECHAR32(result, ord(ch), byteorder) else: - errorhandler('strict', 'utf32', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) continue @@ -841,18 +912,34 @@ return unicode_encode_utf_32_helper(s, size, errors, errorhandler, allow_surrogates, "native") - def unicode_encode_utf_32_be(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_32_helper(s, size, errors, errorhandler, allow_surrogates, "big") - def unicode_encode_utf_32_le(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_32_helper(s, size, errors, errorhandler, allow_surrogates, "little") +def py3k_unicode_encode_utf_32(s, size, errors, + errorhandler=None, allow_surrogates=True): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, + allow_surrogates, "native", + 'utf-32-' + BYTEORDER2) + +def py3k_unicode_encode_utf_32_be(s, size, errors, + errorhandler=None, allow_surrogates=True): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, + allow_surrogates, "big", + 'utf-32-be') + +def py3k_unicode_encode_utf_32_le(s, size, errors, + errorhandler=None, allow_surrogates=True): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, + allow_surrogates, "little", + 'utf-32-le') + # ____________________________________________________________ # utf-7 diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -38,8 +38,10 @@ assert x == y assert type(x) is type(y) - def getdecoder(self, encoding): - return getattr(runicode, "str_decode_%s" % encoding.replace("-", "_")) + def getdecoder(self, encoding, look_for_py3k=False): + prefix = "py3k_" if look_for_py3k else "" + return getattr(runicode, "%sstr_decode_%s" % + (prefix, encoding.replace("-", "_"))) def getencoder(self, encoding): return getattr(runicode, @@ -96,14 +98,17 @@ assert '\xc3' in result def checkdecodeerror(self, s, encoding, start, stop, - addstuff=True, msg=None): + addstuff=True, msg=None, + expected_reported_encoding=None, + look_for_py3k=False): called = [0] def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: assert errors == "foo!" - assert enc == encoding.replace('-', '') + assert enc == (expected_reported_encoding or + encoding.replace('-', '')) assert t is s assert start == startingpos assert stop == endingpos @@ -111,7 +116,7 @@ assert errmsg == msg return u"42424242", stop return u"", endingpos - decoder = self.getdecoder(encoding) + decoder = self.getdecoder(encoding, look_for_py3k=look_for_py3k) if addstuff: s += "some rest in ascii" result, _ = decoder(s, len(s), "foo!", True, errorhandler) @@ -218,6 +223,27 @@ ]: self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + def test_utf16_errors_py3k(self): + letter = sys.byteorder[0] + self.checkdecodeerror("\xff", "utf-16", 0, 1, addstuff=False, + expected_reported_encoding='utf-16-%se' % letter, + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-16-be", 0, 1, addstuff=False, + expected_reported_encoding='utf-16-be', + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-16-le", 0, 1, addstuff=False, + expected_reported_encoding='utf-16-le', + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-32", 0, 1, addstuff=False, + expected_reported_encoding='utf-32-%se' % letter, + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-32-be", 0, 1, addstuff=False, + expected_reported_encoding='utf-32-be', + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-32-le", 0, 1, addstuff=False, + expected_reported_encoding='utf-32-le', + look_for_py3k=True) + def test_utf16_bugs(self): s = '\x80-\xe9\xdeL\xa3\x9b' py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, From pypy.commits at gmail.com Thu Jul 27 11:17:33 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 08:17:33 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Fix for issue #2618 Message-ID: <597a040d.c1a41c0a.abb0a.db7b@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91979:35d75cdc5dd0 Date: 2017-07-27 17:15 +0200 http://bitbucket.org/pypy/pypy/changeset/35d75cdc5dd0/ Log: Fix for issue #2618 diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -618,6 +618,8 @@ def make_encoder_wrapper(name): rname = "unicode_encode_%s" % (name.replace("_encode", ""), ) assert hasattr(runicode, rname) + if hasattr(runicode, 'py3k_' + rname): + rname = 'py3k_' + rname @unwrap_spec(uni=unicode, errors='text_or_none') def wrap_encoder(space, uni, errors="strict"): if errors is None: @@ -632,6 +634,8 @@ def make_utf_encoder_wrapper(name): rname = "unicode_encode_%s" % (name.replace("_encode", ""), ) assert hasattr(runicode, rname) + if hasattr(runicode, 'py3k_' + rname): + rname = 'py3k_' + rname @unwrap_spec(uni=unicode, errors='text_or_none') def wrap_encoder(space, uni, errors="strict"): if errors is None: @@ -647,6 +651,8 @@ def make_decoder_wrapper(name): rname = "str_decode_%s" % (name.replace("_decode", ""), ) assert hasattr(runicode, rname) + if hasattr(runicode, 'py3k_' + rname): + rname = 'py3k_' + rname @unwrap_spec(string='bufferstr', errors='text_or_none', w_final=WrappedDefault(False)) def wrap_decoder(space, string, errors="strict", w_final=None): diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -623,6 +623,9 @@ "surrogatepass") raises(UnicodeDecodeError, b"abc\xed\xa0z".decode, "utf-8", "surrogatepass") + assert u'\ud8ae'.encode('utf_16_be', 'surrogatepass') == b'\xd8\xae' + assert (u'\U0000d8ae'.encode('utf-32-be', 'surrogatepass') == + b'\x00\x00\xd8\xae') def test_badandgoodsurrogatepassexceptions(self): import codecs From pypy.commits at gmail.com Thu Jul 27 11:17:29 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 08:17:29 -0700 (PDT) Subject: [pypy-commit] pypy default: Support for py3k's more precise encoding, as reported to the error handler, Message-ID: <597a0409.4aa8df0a.13b04.0b84@mx.google.com> Author: Armin Rigo Branch: Changeset: r91977:c9e1134edc4a Date: 2017-07-27 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/c9e1134edc4a/ Log: Support for py3k's more precise encoding, as reported to the error handler, when using utf-16 or utf-32 diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -16,6 +16,8 @@ allow_surrogate_by_default = True BYTEORDER = sys.byteorder +BYTEORDER2 = BYTEORDER[0] + 'e' # either "le" or "be" +assert BYTEORDER2 in ('le', 'be') # python 2.7 has a preview of py3k behavior, so those functions # are used either when we're testing wide pypy on narrow cpython @@ -486,9 +488,31 @@ errorhandler, "little") return result, length +def py3k_str_decode_utf_16(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_16_helper(s, size, errors, final, + errorhandler, "native", + 'utf-16-' + BYTEORDER2) + return result, length + +def py3k_str_decode_utf_16_be(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_16_helper(s, size, errors, final, + errorhandler, "big", + 'utf-16-be') + return result, length + +def py3k_str_decode_utf_16_le(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_16_helper(s, size, errors, final, + errorhandler, "little", + 'utf-16-le') + return result, length + def str_decode_utf_16_helper(s, size, errors, final=True, errorhandler=None, - byteorder="native"): + byteorder="native", + public_encoding_name='utf16'): if errorhandler is None: errorhandler = default_unicode_error_decode bo = 0 @@ -546,7 +570,8 @@ if len(s) - pos < 2: if not final: break - r, pos = errorhandler(errors, 'utf16', "truncated data", + r, pos = errorhandler(errors, public_encoding_name, + "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 2: @@ -562,7 +587,8 @@ if not final: break errmsg = "unexpected end of data" - r, pos = errorhandler(errors, 'utf16', errmsg, s, pos, len(s)) + r, pos = errorhandler(errors, public_encoding_name, + errmsg, s, pos, len(s)) result.append(r) if len(s) - pos < 2: break @@ -578,12 +604,12 @@ (ch2 & 0x3FF)) + 0x10000)) continue else: - r, pos = errorhandler(errors, 'utf16', + r, pos = errorhandler(errors, public_encoding_name, "illegal UTF-16 surrogate", s, pos - 4, pos - 2) result.append(r) else: - r, pos = errorhandler(errors, 'utf16', + r, pos = errorhandler(errors, public_encoding_name, "illegal encoding", s, pos - 2, pos) result.append(r) @@ -592,7 +618,8 @@ def unicode_encode_utf_16_helper(s, size, errors, errorhandler=None, allow_surrogates=True, - byteorder='little'): + byteorder='little', + public_encoding_name='utf16'): if errorhandler is None: errorhandler = default_unicode_error_encode if size == 0: @@ -620,13 +647,13 @@ elif ch >= 0xE000 or allow_surrogates: _STORECHAR(result, ch, byteorder) else: - ru, rs, pos = errorhandler(errors, 'utf16', + ru, rs, pos = errorhandler(errors, public_encoding_name, 'surrogates not allowed', s, pos-1, pos) if rs is not None: # py3k only if len(rs) % 2 != 0: - errorhandler('strict', 'utf16', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) result.append(rs) @@ -635,7 +662,7 @@ if ord(ch) < 0xD800: _STORECHAR(result, ord(ch), byteorder) else: - errorhandler('strict', 'utf16', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) continue @@ -648,20 +675,39 @@ return unicode_encode_utf_16_helper(s, size, errors, errorhandler, allow_surrogates, "native") - def unicode_encode_utf_16_be(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_16_helper(s, size, errors, errorhandler, allow_surrogates, "big") - def unicode_encode_utf_16_le(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_16_helper(s, size, errors, errorhandler, allow_surrogates, "little") +def py3k_unicode_encode_utf_16(s, size, errors, + errorhandler=None, + allow_surrogates=True): + return unicode_encode_utf_16_helper(s, size, errors, errorhandler, + allow_surrogates, "native", + 'utf-16-' + BYTEORDER2) + +def py3k_unicode_encode_utf_16_be(s, size, errors, + errorhandler=None, + allow_surrogates=True): + return unicode_encode_utf_16_helper(s, size, errors, errorhandler, + allow_surrogates, "big", + 'utf-16-be') + +def py3k_unicode_encode_utf_16_le(s, size, errors, + errorhandler=None, + allow_surrogates=True): + return unicode_encode_utf_16_helper(s, size, errors, errorhandler, + allow_surrogates, "little", + 'utf-16-le') + # ____________________________________________________________ # utf-32 @@ -684,12 +730,34 @@ errorhandler, "little") return result, length +def py3k_str_decode_utf_32(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "native", + 'utf-32-' + BYTEORDER2) + return result, length + +def py3k_str_decode_utf_32_be(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "big", + 'utf-32-be') + return result, length + +def py3k_str_decode_utf_32_le(s, size, errors, final=True, + errorhandler=None): + result, length, byteorder = str_decode_utf_32_helper(s, size, errors, final, + errorhandler, "little", + 'utf-32-le') + return result, length + BOM32_DIRECT = intmask(0x0000FEFF) BOM32_REVERSE = intmask(0xFFFE0000) def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, - byteorder="native"): + byteorder="native", + public_encoding_name='utf32'): if errorhandler is None: errorhandler = default_unicode_error_decode bo = 0 @@ -744,7 +812,8 @@ if len(s) - pos < 4: if not final: break - r, pos = errorhandler(errors, 'utf32', "truncated data", + r, pos = errorhandler(errors, public_encoding_name, + "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: @@ -753,7 +822,8 @@ ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: - r, pos = errorhandler(errors, 'utf32', "codepoint not in range(0x110000)", + r, pos = errorhandler(errors, public_encoding_name, + "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue @@ -786,7 +856,8 @@ def unicode_encode_utf_32_helper(s, size, errors, errorhandler=None, allow_surrogates=True, - byteorder='little'): + byteorder='little', + public_encoding_name='utf32'): if errorhandler is None: errorhandler = default_unicode_error_encode if size == 0: @@ -808,13 +879,13 @@ ch2 = 0 if 0xD800 <= ch < 0xDC00: if not allow_surrogates: - ru, rs, pos = errorhandler(errors, 'utf32', + ru, rs, pos = errorhandler(errors, public_encoding_name, 'surrogates not allowed', s, pos-1, pos) if rs is not None: # py3k only if len(rs) % 4 != 0: - errorhandler('strict', 'utf32', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) result.append(rs) @@ -823,7 +894,7 @@ if ord(ch) < 0xD800: _STORECHAR32(result, ord(ch), byteorder) else: - errorhandler('strict', 'utf32', + errorhandler('strict', public_encoding_name, 'surrogates not allowed', s, pos-1, pos) continue @@ -841,18 +912,34 @@ return unicode_encode_utf_32_helper(s, size, errors, errorhandler, allow_surrogates, "native") - def unicode_encode_utf_32_be(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_32_helper(s, size, errors, errorhandler, allow_surrogates, "big") - def unicode_encode_utf_32_le(s, size, errors, errorhandler=None, allow_surrogates=True): return unicode_encode_utf_32_helper(s, size, errors, errorhandler, allow_surrogates, "little") +def py3k_unicode_encode_utf_32(s, size, errors, + errorhandler=None, allow_surrogates=True): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, + allow_surrogates, "native", + 'utf-32-' + BYTEORDER2) + +def py3k_unicode_encode_utf_32_be(s, size, errors, + errorhandler=None, allow_surrogates=True): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, + allow_surrogates, "big", + 'utf-32-be') + +def py3k_unicode_encode_utf_32_le(s, size, errors, + errorhandler=None, allow_surrogates=True): + return unicode_encode_utf_32_helper(s, size, errors, errorhandler, + allow_surrogates, "little", + 'utf-32-le') + # ____________________________________________________________ # utf-7 diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -38,8 +38,10 @@ assert x == y assert type(x) is type(y) - def getdecoder(self, encoding): - return getattr(runicode, "str_decode_%s" % encoding.replace("-", "_")) + def getdecoder(self, encoding, look_for_py3k=False): + prefix = "py3k_" if look_for_py3k else "" + return getattr(runicode, "%sstr_decode_%s" % + (prefix, encoding.replace("-", "_"))) def getencoder(self, encoding): return getattr(runicode, @@ -96,14 +98,17 @@ assert '\xc3' in result def checkdecodeerror(self, s, encoding, start, stop, - addstuff=True, msg=None): + addstuff=True, msg=None, + expected_reported_encoding=None, + look_for_py3k=False): called = [0] def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: assert errors == "foo!" - assert enc == encoding.replace('-', '') + assert enc == (expected_reported_encoding or + encoding.replace('-', '')) assert t is s assert start == startingpos assert stop == endingpos @@ -111,7 +116,7 @@ assert errmsg == msg return u"42424242", stop return u"", endingpos - decoder = self.getdecoder(encoding) + decoder = self.getdecoder(encoding, look_for_py3k=look_for_py3k) if addstuff: s += "some rest in ascii" result, _ = decoder(s, len(s), "foo!", True, errorhandler) @@ -218,6 +223,27 @@ ]: self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + def test_utf16_errors_py3k(self): + letter = sys.byteorder[0] + self.checkdecodeerror("\xff", "utf-16", 0, 1, addstuff=False, + expected_reported_encoding='utf-16-%se' % letter, + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-16-be", 0, 1, addstuff=False, + expected_reported_encoding='utf-16-be', + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-16-le", 0, 1, addstuff=False, + expected_reported_encoding='utf-16-le', + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-32", 0, 1, addstuff=False, + expected_reported_encoding='utf-32-%se' % letter, + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-32-be", 0, 1, addstuff=False, + expected_reported_encoding='utf-32-be', + look_for_py3k=True) + self.checkdecodeerror("\xff", "utf-32-le", 0, 1, addstuff=False, + expected_reported_encoding='utf-32-le', + look_for_py3k=True) + def test_utf16_bugs(self): s = '\x80-\xe9\xdeL\xa3\x9b' py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, From pypy.commits at gmail.com Thu Jul 27 11:31:43 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 08:31:43 -0700 (PDT) Subject: [pypy-commit] pypy default: Mention that unbound method objects only exist in Python 2 Message-ID: <597a075f.c90b1c0a.7a3f3.994b@mx.google.com> Author: Armin Rigo Branch: Changeset: r91981:df23f3d1554c Date: 2017-07-27 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/df23f3d1554c/ Log: Mention that unbound method objects only exist in Python 2 diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -330,7 +330,7 @@ - ``frozenset`` (empty frozenset only) - - unbound method objects + - unbound method objects (for Python 2 only) This change requires some changes to ``id`` as well. ``id`` fulfills the following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the From pypy.commits at gmail.com Thu Jul 27 11:31:45 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 08:31:45 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Manual merge of the test from e71aec0042dd Message-ID: <597a0761.12b3df0a.24c1a.fd7d@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91982:2cfb43090965 Date: 2017-07-27 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/2cfb43090965/ Log: Manual merge of the test from e71aec0042dd diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,5 @@ # encoding: utf-8 -import pytest +import pytest, sys from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -416,6 +416,11 @@ raises(ValueError, FunctionType.__setstate__, f, (1, 2, 3)) class AppTestMethod: + def setup_class(cls): + cls.w_runappdirect_on_cpython = cls.space.wrap( + cls.runappdirect and + '__pypy__' not in sys.builtin_module_names) + def test_simple_call(self): class A(object): def func(self, arg2): @@ -586,7 +591,6 @@ assert meth == meth assert meth == MethodType(func, object) - @pytest.mark.skipif("config.option.runappdirect") def test_method_identity(self): class A(object): def m(self): @@ -603,19 +607,24 @@ a = A() a2 = A() - assert a.m is a.m - assert id(a.m) == id(a.m) - assert a.m is not a.n - assert id(a.m) != id(a.n) - assert a.m is not a2.m - assert id(a.m) != id(a2.m) + x = a.m; y = a.m + assert x is not y + assert id(x) != id(y) + assert x == y + assert x is not a.n + assert id(x) != id(a.n) + assert x is not a2.m + assert id(x) != id(a2.m) - assert A.m is A.m - assert id(A.m) == id(A.m) - assert A.m is not A.n - assert id(A.m) != id(A.n) - assert A.m is B.m - assert id(A.m) == id(B.m) + if not self.runappdirect_on_cpython: + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m == A.m + x = A.m + assert x is not A.n + assert id(x) != id(A.n) + assert x is B.m + assert id(x) == id(B.m) class TestMethod: From pypy.commits at gmail.com Thu Jul 27 11:31:42 2017 From: pypy.commits at gmail.com (arigo) Date: Thu, 27 Jul 2017 08:31:42 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: This is how e71aec0042dd looks like in py3.5 (sorry, there are Message-ID: <597a075e.2e9ddf0a.61f95.aa55@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91980:d7812755dc71 Date: 2017-07-27 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/d7812755dc71/ Log: This is how e71aec0042dd looks like in py3.5 (sorry, there are likely conflicts and this is not solving them) diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -516,8 +516,9 @@ def __init__(self, space, w_function, w_instance): self.space = space + assert w_instance is not None # unbound methods only exist in Python 2 self.w_function = w_function - self.w_instance = w_instance # or None + self.w_instance = w_instance def descr_method__new__(space, w_subtype, w_function, w_instance): if space.is_w(w_instance, space.w_None): @@ -577,24 +578,6 @@ return space.w_False return space.newbool(space.eq_w(self.w_function, w_other.w_function)) - def is_w(self, space, other): - if not isinstance(other, Method): - return False - return (self.w_instance is other.w_instance and - self.w_function is other.w_function) - - def immutable_unique_id(self, space): - from pypy.objspace.std.util import IDTAG_METHOD as tag - from pypy.objspace.std.util import IDTAG_SHIFT - if self.w_instance is not None: - id = space.bigint_w(space.id(self.w_instance)) - id = id.lshift(LONG_BIT) - else: - id = rbigint.fromint(0) - id = id.or_(space.bigint_w(space.id(self.w_function))) - id = id.lshift(IDTAG_SHIFT).int_or_(tag) - return space.newlong_from_rbigint(id) - def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) @@ -606,7 +589,7 @@ from pypy.interpreter.gateway import BuiltinCode w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) - w_instance = self.w_instance or space.w_None + w_instance = self.w_instance w_function = self.w_function if (isinstance(w_function, Function) and isinstance(w_function.code, BuiltinCode)): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -9,7 +9,6 @@ IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 -IDTAG_METHOD = 9 IDTAG_SPECIAL = 11 # -1 - (-maxunicode-1): unichar # 0 - 255: char # 256: empty string From pypy.commits at gmail.com Thu Jul 27 12:54:18 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 27 Jul 2017 09:54:18 -0700 (PDT) Subject: [pypy-commit] pypy install-rpython: tmp Message-ID: <597a1aba.10841c0a.b3c56.7a67@mx.google.com> Author: Ronan Lamy Branch: install-rpython Changeset: r91983:cde84dc03420 Date: 2017-07-27 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/cde84dc03420/ Log: tmp diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,10 +1,7 @@ include README-rpython.rst exclude README.rst recursive-include rpython/translator/c/src *.c *.h -prune _pytest -prune ctypes_configure +recursive-include rpython/rlib/src *.c *.h +recursive-include rpython/rlib/rvmprof/src *.c *.h prune include prune lib-python -prune lib_pypy -prune py -prune pypy diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,9 +18,13 @@ RPython. Packaging issues are likely, feedback is welcome. """ +PKG_EXCLUDES = ( + 'lib_pypy', 'lib_pypy.*', 'pypy', 'pypy.*', + 'py', 'py.*', '_pytest', '_pytest.*') + setup( name='rpython', - version='0.1.4', + version='0.2.0', description='RPython', long_description=long_description, @@ -40,9 +44,15 @@ ], keywords='development', - packages=find_packages(), # MANIFEST.in filters out all the pypy stuff - package_data={'rpython': ['translator/c/src/*.c', 'translator/c/src/*.h']}, - install_requires=['pytest'], + packages=find_packages(exclude=PKG_EXCLUDES), + package_data={ + 'rpython.translator.c': ['src/*.c', 'src/*.h'], + 'rpython.rlib': ['src/*.c', 'src/*.h'], + 'rpython.rlib.rvmprof': ['src/**/*.c', 'src/**/*.h'], + 'rpython.rlib.rjitlog': ['src/*.c', 'src/*.h'], + 'rpython.jit.backend.llsupport': ['src/*.c', 'src/*.h'], + }, + install_requires=['pytest<3'], entry_points={ "console_scripts" : [ "rpython = rpython.__main__:main", From pypy.commits at gmail.com Thu Jul 27 12:54:20 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 27 Jul 2017 09:54:20 -0700 (PDT) Subject: [pypy-commit] pypy install-rpython: Update version, package_data, install_requires, ... Message-ID: <597a1abc.1babdf0a.4684e.c432@mx.google.com> Author: Ronan Lamy Branch: install-rpython Changeset: r91984:3c3eeb58a4fd Date: 2017-07-27 17:53 +0100 http://bitbucket.org/pypy/pypy/changeset/3c3eeb58a4fd/ Log: Update version, package_data, install_requires, ... diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,2 @@ include README-rpython.rst exclude README.rst -recursive-include rpython/translator/c/src *.c *.h -recursive-include rpython/rlib/src *.c *.h -recursive-include rpython/rlib/rvmprof/src *.c *.h -prune include -prune lib-python diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -46,15 +46,15 @@ packages=find_packages(exclude=PKG_EXCLUDES), package_data={ - 'rpython.translator.c': ['src/*.c', 'src/*.h'], - 'rpython.rlib': ['src/*.c', 'src/*.h'], - 'rpython.rlib.rvmprof': ['src/**/*.c', 'src/**/*.h'], - 'rpython.rlib.rjitlog': ['src/*.c', 'src/*.h'], - 'rpython.jit.backend.llsupport': ['src/*.c', 'src/*.h'], + 'rpython': ['**/*.c', '**/*.h'], + 'rpython.rlib.rvmprof': ['src/shared/**/*.*'], }, + # https://github.com/pypa/setuptools/issues/1064 + include_package_data=True, + install_requires=['pytest<3'], entry_points={ - "console_scripts" : [ + "console_scripts": [ "rpython = rpython.__main__:main", ], }, From pypy.commits at gmail.com Thu Jul 27 13:13:57 2017 From: pypy.commits at gmail.com (rlamy) Date: Thu, 27 Jul 2017 10:13:57 -0700 (PDT) Subject: [pypy-commit] pypy install-rpython: Improve README Message-ID: <597a1f55.cfa3df0a.90ae8.b439@mx.google.com> Author: Ronan Lamy Branch: install-rpython Changeset: r91985:4ec9624649ac Date: 2017-07-27 18:13 +0100 http://bitbucket.org/pypy/pypy/changeset/4ec9624649ac/ Log: Improve README diff --git a/README-rpython.rst b/README-rpython.rst --- a/README-rpython.rst +++ b/README-rpython.rst @@ -5,13 +5,12 @@ dynamic languages, emphasizing a clean separation between language specification and implementation aspects. -By separating concerns in this way, our implementation of Python - and other -dynamic languages - is able to automatically generate a Just-in-Time compiler -for any dynamic language. It also allows a mix-and-match approach to -implementation decisions, including many that have historically been outside of -a user's control, such as target platform, memory and threading models, garbage -collection strategies, and optimizations applied, including whether or not to -have a JIT in the first place. +By separating concerns in this way, it can automatically generate a +Just-in-Time compiler for any dynamic language. It also allows a mix-and-match +approach to implementation decisions, including many that have historically +been outside of a user's control, such as target platform, memory and threading +models, garbage collection strategies, and optimizations applied, including +whether or not to have a JIT in the first place. Links ----- From pypy.commits at gmail.com Thu Jul 27 17:39:53 2017 From: pypy.commits at gmail.com (mattip) Date: Thu, 27 Jul 2017 14:39:53 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-refactor-tp_dealloc: try to simplify handling of tp_dealloc. start by simplify, disable leakchecking Message-ID: <597a5da9.aa82df0a.d431e.ea29@mx.google.com> Author: Matti Picus Branch: cpyext-refactor-tp_dealloc Changeset: r91986:0e68f216e013 Date: 2017-07-28 00:38 +0300 http://bitbucket.org/pypy/pypy/changeset/0e68f216e013/ Log: try to simplify handling of tp_dealloc. start by simplify, disable leakchecking diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -5,7 +5,7 @@ Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, from_ref, Py_IncRef, Py_DecRef, + PyObject, PyObjectP, from_ref, incref, decref, get_typedescr) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall @@ -66,7 +66,7 @@ obj_voidp = rffi.cast(rffi.VOIDP, obj) generic_cpy_call(space, pto.c_tp_free, obj_voidp) if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: - Py_DecRef(space, rffi.cast(PyObject, pto)) + decref(space, rffi.cast(PyObject, pto)) @cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True) def _PyObject_GC_New(space, type): @@ -319,7 +319,7 @@ @cpython_api([PyObject], PyObject, result_is_ll=True) def PyObject_SelfIter(space, ref): """Undocumented function, this is what CPython does.""" - Py_IncRef(space, ref) + incref(space, ref) return ref @cpython_api([PyObject, PyObject], PyObject) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -102,13 +102,14 @@ 'itertools', 'time', 'binascii', 'micronumpy', 'mmap' ]) + check_leaks = False def cleanup(self): self.space.getexecutioncontext().cleanup_cpyext_state() rawrefcount._collect() self.space.user_del_action._run_finalizers() try: - leakfinder.stop_tracking_allocations(check=True) + leakfinder.stop_tracking_allocations(check=self.check_leaks) except leakfinder.MallocMismatch as e: result = e.args[0] filtered_result = {} diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py --- a/pypy/module/cpyext/test/test_userslots.py +++ b/pypy/module/cpyext/test/test_userslots.py @@ -16,11 +16,14 @@ """) w_datetype = space.type(w_date) py_date = make_ref(space, w_date) - py_datetype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_datetype)) + py_datetype = make_ref(space, w_datetype) + py_datetype = rffi.cast(PyTypeObjectPtr, py_datetype) assert py_datetype.c_tp_as_number assert py_datetype.c_tp_as_number.c_nb_add w_obj = generic_cpy_call(space, py_datetype.c_tp_as_number.c_nb_add, py_date, py_date) + api.Py_DecRef(py_datetype) + api.Py_DecRef(py_date) assert space.str_w(w_obj) == 'sum!' def test_tp_new_from_python(self, space, api): @@ -46,6 +49,7 @@ arg, space.newdict({})) w_year = space.getattr(w_obj, space.newtext('year')) assert space.int_w(w_year) == 1 + api.Py_DecRef(py_datetype) def test_descr_slots(self, space, api): w_descr = space.appexec([], """(): @@ -78,6 +82,11 @@ w_res = generic_cpy_call(space, py_descrtype.c_tp_descr_get, py_descr, None, space.w_int) assert space.int_w(w_res) == 43 + api.Py_DecRef(py_descr) + api.Py_DecRef(py_descrtype) + # the w_descrtype leaks, it is immortal? + self.check_leaks = False + class AppTestUserSlots(AppTestCpythonExtensionBase): def test_tp_hash_from_python(self): @@ -137,19 +146,23 @@ ("get__timestamp", "METH_NOARGS", ''' PyObject * one = PyLong_FromLong(1); + Py_INCREF(one); + Py_INCREF(one); PyObject * a = PyTuple_Pack(3, one, one, one); PyObject * k = NULL; obj = _Timestamp.tp_new(&_Timestamp, a, k); - Py_DECREF(one); + Py_DECREF(a); return obj; '''), ("get_timestamp", "METH_NOARGS", ''' PyObject * one = PyLong_FromLong(1); + Py_INCREF(one); + Py_INCREF(one); PyObject * a = PyTuple_Pack(3, one, one, one); PyObject * k = NULL; obj = Timestamp.tp_new(&Timestamp, a, k); - Py_DECREF(one); + Py_DECREF(a); return obj; '''), ], prologue=''' diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -505,7 +505,7 @@ W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w, force_new_layout=new_layout, - is_heaptype=flag_heaptype) + is_heaptype=flag_heaptype, is_cpytype=True) self.flag_cpytype = True # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: @@ -528,28 +528,7 @@ @slot_function([PyObject], lltype.Void) def subtype_dealloc(space, obj): - pto = obj.c_ob_type - base = pto - this_func_ptr = llslot(space, subtype_dealloc) - w_obj = from_ref(space, rffi.cast(PyObject, base)) - # This wrapper is created on a specific type, call it w_A. - # We wish to call the dealloc function from one of the base classes of w_A, - # the first of which is not this function itself. - # w_obj is an instance of w_A or one of its subclasses. So climb up the - # inheritance chain until base.c_tp_dealloc is exactly this_func, and then - # continue on up until they differ. - #print 'subtype_dealloc, start from', rffi.charp2str(base.c_tp_name) - while base.c_tp_dealloc != this_func_ptr: - base = base.c_tp_base - assert base - #print ' ne move to', rffi.charp2str(base.c_tp_name) - w_obj = from_ref(space, rffi.cast(PyObject, base)) - while base.c_tp_dealloc == this_func_ptr: - base = base.c_tp_base - assert base - #print ' eq move to', rffi.charp2str(base.c_tp_name) - w_obj = from_ref(space, rffi.cast(PyObject, base)) - #print ' end with', rffi.charp2str(base.c_tp_name) + base = rffi.cast(PyTypeObjectPtr, as_pyobj(space, space.w_object)) dealloc = base.c_tp_dealloc # XXX call tp_del if necessary generic_cpy_call(space, dealloc, obj) @@ -757,11 +736,8 @@ # only for the exact type, like 'space.w_tuple' or 'space.w_list' pto.c_tp_dealloc = typedescr.get_dealloc().get_llhelper(space) else: - # for all subtypes, use base's dealloc (requires sorting in attach_all) - pto.c_tp_dealloc = pto.c_tp_base.c_tp_dealloc - if not pto.c_tp_dealloc: - # strange, but happens (ABCMeta) - pto.c_tp_dealloc = llslot(space, subtype_dealloc) + # always assign to subtype_dealloc for w_obj -> pyobj instantiation + pto.c_tp_dealloc = subtype_dealloc.api_func.get_llhelper(space) if builder.cpyext_type_init is not None: builder.cpyext_type_init.append((pto, w_type)) @@ -770,13 +746,28 @@ finish_type_2(space, pto, w_type) pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) + + update_all_slots(space, w_type, pto) + if pto.c_tp_base: if pto.c_tp_base.c_tp_basicsize > pto.c_tp_basicsize: pto.c_tp_basicsize = pto.c_tp_base.c_tp_basicsize if pto.c_tp_itemsize < pto.c_tp_base.c_tp_itemsize: pto.c_tp_itemsize = pto.c_tp_base.c_tp_itemsize - update_all_slots(space, w_type, pto) + # XXX refactor - parts of this are done in finish_type_2 -> inherit_slots + if not pto.c_tp_as_number: + pto.c_tp_as_number = pto.c_tp_base.c_tp_as_number + pto.c_tp_flags |= pto.c_tp_base.c_tp_flags & Py_TPFLAGS_CHECKTYPES + pto.c_tp_flags |= pto.c_tp_base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS + if not pto.c_tp_as_sequence: + pto.c_tp_as_sequence = pto.c_tp_base.c_tp_as_sequence + pto.c_tp_flags |= pto.c_tp_base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS + if not pto.c_tp_as_mapping: + pto.c_tp_as_mapping = pto.c_tp_base.c_tp_as_mapping + #if not pto.c_tp_as_buffer: pto.c_tp_as_buffer = base.c_tp_as_buffer + + if not pto.c_tp_new: base_object_pyo = make_ref(space, space.w_object) base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo) @@ -798,6 +789,9 @@ return 0 def type_realize(space, py_obj): + """ + Creates an interpreter type from a PyTypeObject structure. + """ pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READY == 0 assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 @@ -889,20 +883,6 @@ w_obj.ready() finish_type_2(space, py_type, w_obj) - base = py_type.c_tp_base - if base: - # XXX refactor - parts of this are done in finish_type_2 -> inherit_slots - if not py_type.c_tp_as_number: - py_type.c_tp_as_number = base.c_tp_as_number - py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_CHECKTYPES - py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS - if not py_type.c_tp_as_sequence: - py_type.c_tp_as_sequence = base.c_tp_as_sequence - py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS - if not py_type.c_tp_as_mapping: - py_type.c_tp_as_mapping = base.c_tp_as_mapping - #if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer - return w_obj def finish_type_1(space, pto, bases_w=None): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -170,7 +170,7 @@ @dont_look_inside def __init__(self, space, name, bases_w, dict_w, overridetypedef=None, force_new_layout=False, - is_heaptype=True): + is_heaptype=True, is_cpytype=False): self.space = space self.name = name self.bases_w = bases_w @@ -181,7 +181,7 @@ self.w_doc = space.w_None self.weak_subclasses = [] self.flag_heaptype = is_heaptype - self.flag_cpytype = False + self.flag_cpytype = is_cpytype self.flag_abstract = False self.flag_sequence_bug_compat = False self.flag_map_or_seq = '?' # '?' means "don't know, check otherwise" @@ -1148,7 +1148,7 @@ for w_base in w_self.bases_w: if not isinstance(w_base, W_TypeObject): continue - w_self.flag_cpytype |= w_base.flag_cpytype + #w_self.flag_cpytype |= w_base.flag_cpytype w_self.flag_abstract |= w_base.flag_abstract if w_self.flag_map_or_seq == '?': w_self.flag_map_or_seq = w_base.flag_map_or_seq From pypy.commits at gmail.com Fri Jul 28 06:25:25 2017 From: pypy.commits at gmail.com (mattip) Date: Fri, 28 Jul 2017 03:25:25 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-refactor-tp_dealloc: toggle check_leaks, c_tp_dict is collected but makeGetSet is creating immortal objects Message-ID: <597b1115.01571c0a.18a20.7ba1@mx.google.com> Author: Matti Picus Branch: cpyext-refactor-tp_dealloc Changeset: r91987:23ec069ceb74 Date: 2017-07-28 13:24 +0300 http://bitbucket.org/pypy/pypy/changeset/23ec069ceb74/ Log: toggle check_leaks, c_tp_dict is collected but makeGetSet is creating immortal objects leakcheck fails test in test_structseq.py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -102,7 +102,7 @@ 'itertools', 'time', 'binascii', 'micronumpy', 'mmap' ]) - check_leaks = False + check_leaks = True def cleanup(self): self.space.getexecutioncontext().cleanup_cpyext_state() diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -255,6 +255,7 @@ ]) d = module.get_type_dict(1) assert d['real'].__get__(1, 1) == 1 + def test_advanced(self): module = self.import_extension('foo', [ ("dict_len", "METH_O", diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -154,6 +154,7 @@ make_typedescr(W_GetSetPropertyEx.typedef, basestruct=PyGetSetDescrObject.TO, attach=getsetdescr_attach, + dealloc=getsetdescr_dealloc, ) make_typedescr(W_PyCClassMethodObject.typedef, basestruct=PyMethodDescrObject.TO, @@ -199,6 +200,13 @@ assert isinstance(w_obj, W_GetSetPropertyEx) py_getsetdescr.c_d_getset = w_obj.getset + at slot_function([PyObject], lltype.Void) +def getsetdescr_dealloc(space, obj): + from pypy.module.cpyext.object import _dealloc + py_getsetdescr = rffi.cast(PyGetSetDescrObject, obj) + xxx + _dealloc(space, obj) + def methoddescr_attach(space, py_obj, w_obj, w_userdata=None): py_methoddescr = rffi.cast(PyMethodDescrObject, py_obj) # XXX assign to d_dname, d_type? @@ -655,7 +663,7 @@ Py_DecRef(space, obj_pto.c_tp_bases) #Py_DecRef(space, obj_pto.c_tp_mro) Py_DecRef(space, obj_pto.c_tp_cache) # let's do it like cpython - #Py_DecRef(space, obj_pto.c_tp_dict) + Py_DecRef(space, obj_pto.c_tp_dict) if obj_pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: heaptype = rffi.cast(PyHeapTypeObject, obj) Py_DecRef(space, heaptype.c_ht_name) @@ -930,10 +938,10 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) - #w_dict = w_obj.getdict(space) + w_dict = w_obj.getdict(space) # pass in the w_obj to convert any values that are # unbound GetSetProperty into bound PyGetSetDescrObject - #pto.c_tp_dict = make_ref(space, w_dict, w_obj) + pto.c_tp_dict = make_ref(space, w_dict, w_obj) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) def PyType_IsSubtype(space, a, b): From pypy.commits at gmail.com Fri Jul 28 10:34:08 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 28 Jul 2017 07:34:08 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <597b4b60.2cacdf0a.1388e.1760@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91988:b4c040585955 Date: 2017-07-28 15:33 +0100 http://bitbucket.org/pypy/pypy/changeset/b4c040585955/ Log: hg merge default diff too long, truncating to 2000 out of 19088 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -27,16 +27,17 @@ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ -^pypy/module/cppyy/src/.+\.o$ -^pypy/module/cppyy/bench/.+\.so$ -^pypy/module/cppyy/bench/.+\.root$ -^pypy/module/cppyy/bench/.+\.d$ -^pypy/module/cppyy/src/.+\.errors$ -^pypy/module/cppyy/test/.+_rflx\.cpp$ -^pypy/module/cppyy/test/.+\.so$ -^pypy/module/cppyy/test/.+\.rootmap$ -^pypy/module/cppyy/test/.+\.exe$ -^pypy/module/cppyy/test/.+_cint.h$ +^pypy/module/_cppyy/src/.+\.o$ +^pypy/module/_cppyy/bench/.+\.so$ +^pypy/module/_cppyy/bench/.+\.root$ +^pypy/module/_cppyy/bench/.+\.d$ +^pypy/module/_cppyy/src/.+\.errors$ +^pypy/module/_cppyy/test/.+_rflx\.cpp$ +^pypy/module/_cppyy/test/.+\.so$ +^pypy/module/_cppyy/test/.+\.rootmap$ +^pypy/module/_cppyy/test/.+\.exe$ +^pypy/module/_cppyy/test/.+_cint.h$ +^pypy/module/_cppyy/.+/*\.pcm$ ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$ ^pypy/doc/.+\.html$ ^pypy/doc/config/.+\.rst$ @@ -93,6 +94,3 @@ ^release/ ^rpython/_cache$ -pypy/module/cppyy/.+/*\.pcm - - diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -22,12 +22,27 @@ linklibs = ['tcl', 'tk'] libdirs = [] else: - for _ver in ['', '8.6', '8.5', '']: + # On some Linux distributions, the tcl and tk libraries are + # stored in /usr/include, so we must check this case also + libdirs = [] + found = False + for _ver in ['', '8.6', '8.5']: incdirs = ['/usr/include/tcl' + _ver] linklibs = ['tcl' + _ver, 'tk' + _ver] - libdirs = [] if os.path.isdir(incdirs[0]): + found = True break + if not found: + for _ver in ['8.6', '8.5', '']: + incdirs = [] + linklibs = ['tcl' + _ver, 'tk' + _ver] + if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])): + found = True + break + if not found: + sys.stderr.write("*** TCL libraries not found! Falling back...\n") + incdirs = [] + linklibs = ['tcl', 'tk'] config_ffi = FFI() config_ffi.cdef(""" diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -95,6 +95,7 @@ #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -1,7 +1,12 @@ /***** Support code for embedding *****/ -#if defined(_MSC_VER) +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) # define CFFI_DLLEXPORT __declspec(dllexport) #elif defined(__GNUC__) # define CFFI_DLLEXPORT __attribute__((visibility("default"))) @@ -525,3 +530,7 @@ #undef cffi_compare_and_swap #undef cffi_write_barrier #undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -412,6 +412,9 @@ prnt(' }') prnt(' p[0] = (const void *)0x%x;' % self._version) prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in # 'export_symbols', so instead of fighting it, just give up and @@ -578,7 +581,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.BasePrimitiveType): - if tp.is_integer_type(): + if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif isinstance(tp, model.UnknownFloatType): return '_cffi_from_c_double(%s)' % (var,) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -296,7 +296,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): - if tp.is_integer_type(): + if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) @@ -872,6 +872,7 @@ #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,7 +39,7 @@ "thread", "itertools", "pyexpat", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", - "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy" + "_csv", "_pypyjson", "_posixsubprocess", # "_cppyy", "micronumpy" "_jitlog", ]) @@ -71,8 +71,8 @@ if name in translation_modules: translation_modules.remove(name) - if "cppyy" in working_modules: - working_modules.remove("cppyy") # not tested on win32 + if "_cppyy" in working_modules: + working_modules.remove("_cppyy") # not tested on win32 # The _locale module is needed by site.py on Windows default_modules.add("_locale") @@ -81,8 +81,8 @@ working_modules.remove('fcntl') # LOCK_NB not defined working_modules.remove("_minimal_curses") working_modules.remove("termios") - if "cppyy" in working_modules: - working_modules.remove("cppyy") # depends on ctypes + if "_cppyy" in working_modules: + working_modules.remove("_cppyy") # depends on ctypes #if sys.platform.startswith("linux"): # _mach = os.popen('uname -m', 'r').read().strip() @@ -94,7 +94,7 @@ '_multiprocessing': [('objspace.usemodules.time', True), ('objspace.usemodules.thread', True)], 'cpyext': [('objspace.usemodules.array', True)], - 'cppyy': [('objspace.usemodules.cpyext', True)], + '_cppyy': [('objspace.usemodules.cpyext', True)], 'faulthandler': [('objspace.usemodules._vmprof', True)], } module_suggests = { diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst deleted file mode 100644 --- a/pypy/doc/cppyy.rst +++ /dev/null @@ -1,672 +0,0 @@ -cppyy: C++ bindings for PyPy -============================ - -The cppyy module delivers dynamic Python-C++ bindings. -It is designed for automation, high performance, scale, interactivity, and -handling all of modern C++ (11, 14, etc.). -It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ -reflection and interactivity. -Reflection information is extracted from C++ header files. -Cppyy itself is built into PyPy (an alternative exists for CPython), but -it requires a `backend`_, installable through pip, to interface with Cling. - -.. _Cling: https://root.cern.ch/cling -.. _LLVM: http://llvm.org/ -.. _clang: http://clang.llvm.org/ -.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend - - -Installation ------------- - -This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy -module, which is no longer supported. -Both the tooling and user-facing Python codes are very backwards compatible, -however. -Further dependencies are cmake (for general build), Python2.7 (for LLVM), and -a modern C++ compiler (one that supports at least C++11). - -Assuming you have a recent enough version of PyPy installed, use pip to -complete the installation of cppyy:: - - $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend - -Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS -environment variable) to a number appropriate for your machine. -The building process may take quite some time as it includes a customized -version of LLVM as part of Cling, which is why --verbose is recommended so that -you can see the build progress. - -The default installation will be under -$PYTHONHOME/site-packages/cppyy_backend/lib, -which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). -If you need the dictionary and class map generation tools (used in the examples -below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your -executable path (PATH). - - -Basic bindings example ----------------------- - -These examples assume that cppyy_backend is pointed to by the environment -variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and -CPPYYHOME/bin to PATH. - -Let's first test with a trivial example whether all packages are properly -installed and functional. -Create a C++ header file with some class in it (all functions are made inline -for convenience; if you have out-of-line code, link with it as appropriate):: - - $ cat MyClass.h - class MyClass { - public: - MyClass(int i = -99) : m_myint(i) {} - - int GetMyInt() { return m_myint; } - void SetMyInt(int i) { m_myint = i; } - - public: - int m_myint; - }; - -Then, generate the bindings using ``genreflex`` (installed under -cppyy_backend/bin in site_packages), and compile the code:: - - $ genreflex MyClass.h - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling - -Next, make sure that the library can be found through the dynamic lookup path -(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), -for example by adding ".". -Now you're ready to use the bindings. -Since the bindings are designed to look pythonistic, it should be -straightforward:: - - $ pypy-c - >>>> import cppyy - >>>> cppyy.load_reflection_info("libMyClassDict.so") - - >>>> myinst = cppyy.gbl.MyClass(42) - >>>> print myinst.GetMyInt() - 42 - >>>> myinst.SetMyInt(33) - >>>> print myinst.m_myint - 33 - >>>> myinst.m_myint = 77 - >>>> print myinst.GetMyInt() - 77 - >>>> help(cppyy.gbl.MyClass) # shows that normal python introspection works - -That's all there is to it! - - -Automatic class loader ----------------------- - -There is one big problem in the code above, that prevents its use in a (large -scale) production setting: the explicit loading of the reflection library. -Clearly, if explicit load statements such as these show up in code downstream -from the ``MyClass`` package, then that prevents the ``MyClass`` author from -repackaging or even simply renaming the dictionary library. - -The solution is to make use of an automatic class loader, so that downstream -code never has to call ``load_reflection_info()`` directly. -The class loader makes use of so-called rootmap files, which ``genreflex`` -can produce. -These files contain the list of available C++ classes and specify the library -that needs to be loaded for their use (as an aside, this listing allows for a -cross-check to see whether reflection info is generated for all classes that -you expect). -By convention, the rootmap files should be located next to the reflection info -libraries, so that they can be found through the normal shared library search -path. -They can be concatenated together, or consist of a single rootmap file per -library. -For example:: - - $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling - -where the first option (``--rootmap``) specifies the output file name, and the -second option (``--rootmap-lib``) the name of the reflection library where -``MyClass`` will live. -It is necessary to provide that name explicitly, since it is only in the -separate linking step where this name is fixed. -If the second option is not given, the library is assumed to be libMyClass.so, -a name that is derived from the name of the header file. - -With the rootmap file in place, the above example can be rerun without explicit -loading of the reflection info library:: - - $ pypy-c - >>>> import cppyy - >>>> myinst = cppyy.gbl.MyClass(42) - >>>> print myinst.GetMyInt() - 42 - >>>> # etc. ... - -As a caveat, note that the class loader is currently limited to classes only. - - -Advanced example ----------------- - -The following snippet of C++ is very contrived, to allow showing that such -pathological code can be handled and to show how certain features play out in -practice:: - - $ cat MyAdvanced.h - #include - - class Base1 { - public: - Base1(int i) : m_i(i) {} - virtual ~Base1() {} - int m_i; - }; - - class Base2 { - public: - Base2(double d) : m_d(d) {} - virtual ~Base2() {} - double m_d; - }; - - class C; - - class Derived : public virtual Base1, public virtual Base2 { - public: - Derived(const std::string& name, int i, double d) : Base1(i), Base2(d), m_name(name) {} - virtual C* gimeC() { return (C*)0; } - std::string m_name; - }; - - Base2* BaseFactory(const std::string& name, int i, double d) { - return new Derived(name, i, d); - } - -This code is still only in a header file, with all functions inline, for -convenience of the example. -If the implementations live in a separate source file or shared library, the -only change needed is to link those in when building the reflection library. - -If you were to run ``genreflex`` like above in the basic example, you will -find that not all classes of interest will be reflected, nor will be the -global factory function. -In particular, ``std::string`` will be missing, since it is not defined in -this header file, but in a header file that is included. -In practical terms, general classes such as ``std::string`` should live in a -core reflection set, but for the moment assume we want to have it in the -reflection library that we are building for this example. - -The ``genreflex`` script can be steered using a so-called `selection file`_ -(see "Generating Reflex Dictionaries") -which is a simple XML file specifying, either explicitly or by using a -pattern, which classes, variables, namespaces, etc. to select from the given -header file. -With the aid of a selection file, a large project can be easily managed: -simply ``#include`` all relevant headers into a single header file that is -handed to ``genreflex``. -In fact, if you hand multiple header files to ``genreflex``, then a selection -file is almost obligatory: without it, only classes from the last header will -be selected. -Then, apply a selection file to pick up all the relevant classes. -For our purposes, the following rather straightforward selection will do -(the name ``lcgdict`` for the root is historical, but required):: - - $ cat MyAdvanced.xml - - - - - - - -.. _selection file: https://root.cern.ch/how/how-use-reflex - -Now the reflection info can be generated and compiled:: - - $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling - -and subsequently be used from PyPy:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info("libAdvExDict.so") - - >>>> d = cppyy.gbl.BaseFactory("name", 42, 3.14) - >>>> type(d) - - >>>> isinstance(d, cppyy.gbl.Base1) - True - >>>> isinstance(d, cppyy.gbl.Base2) - True - >>>> d.m_i, d.m_d - (42, 3.14) - >>>> d.m_name == "name" - True - >>>> - -Again, that's all there is to it! - -A couple of things to note, though. -If you look back at the C++ definition of the ``BaseFactory`` function, -you will see that it declares the return type to be a ``Base2``, yet the -bindings return an object of the actual type ``Derived``? -This choice is made for a couple of reasons. -First, it makes method dispatching easier: if bound objects are always their -most derived type, then it is easy to calculate any offsets, if necessary. -Second, it makes memory management easier: the combination of the type and -the memory address uniquely identifies an object. -That way, it can be recycled and object identity can be maintained if it is -entered as a function argument into C++ and comes back to PyPy as a return -value. -Last, but not least, casting is decidedly unpythonistic. -By always providing the most derived type known, casting becomes unnecessary. -For example, the data member of ``Base2`` is simply directly available. -Note also that the unreflected ``gimeC`` method of ``Derived`` does not -preclude its use. -It is only the ``gimeC`` method that is unusable as long as class ``C`` is -unknown to the system. - - -Features --------- - -The following is not meant to be an exhaustive list, since cppyy is still -under active development. -Furthermore, the intention is that every feature is as natural as possible on -the python side, so if you find something missing in the list below, simply -try it out. -It is not always possible to provide exact mapping between python and C++ -(active memory management is one such case), but by and large, if the use of a -feature does not strike you as obvious, it is more likely to simply be a bug. -That is a strong statement to make, but also a worthy goal. -For the C++ side of the examples, refer to this :doc:`example code `, which was -bound using:: - - $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling - -* **abstract classes**: Are represented as python classes, since they are - needed to complete the inheritance hierarchies, but will raise an exception - if an attempt is made to instantiate from them. - Example:: - - >>>> from cppyy.gbl import AbstractClass, ConcreteClass - >>>> a = AbstractClass() - Traceback (most recent call last): - File "", line 1, in - TypeError: cannot instantiate abstract class 'AbstractClass' - >>>> issubclass(ConcreteClass, AbstractClass) - True - >>>> c = ConcreteClass() - >>>> isinstance(c, AbstractClass) - True - >>>> - -* **arrays**: Supported for builtin data types only, as used from module - ``array``. - Out-of-bounds checking is limited to those cases where the size is known at - compile time (and hence part of the reflection info). - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> from array import array - >>>> c = ConcreteClass() - >>>> c.array_method(array('d', [1., 2., 3., 4.]), 4) - 1 2 3 4 - >>>> - -* **builtin data types**: Map onto the expected equivalent python types, with - the caveat that there may be size differences, and thus it is possible that - exceptions are raised if an overflow is detected. - -* **casting**: Is supposed to be unnecessary. - Object pointer returns from functions provide the most derived class known - in the hierarchy of the object being returned. - This is important to preserve object identity as well as to make casting, - a pure C++ feature after all, superfluous. - Example:: - - >>>> from cppyy.gbl import AbstractClass, ConcreteClass - >>>> c = ConcreteClass() - >>>> ConcreteClass.show_autocast.__doc__ - 'AbstractClass* ConcreteClass::show_autocast()' - >>>> d = c.show_autocast() - >>>> type(d) - - >>>> - - However, if need be, you can perform C++-style reinterpret_casts (i.e. - without taking offsets into account), by taking and rebinding the address - of an object:: - - >>>> from cppyy import addressof, bind_object - >>>> e = bind_object(addressof(d), AbstractClass) - >>>> type(e) - - >>>> - -* **classes and structs**: Get mapped onto python classes, where they can be - instantiated as expected. - If classes are inner classes or live in a namespace, their naming and - location will reflect that. - Example:: - - >>>> from cppyy.gbl import ConcreteClass, Namespace - >>>> ConcreteClass == Namespace.ConcreteClass - False - >>>> n = Namespace.ConcreteClass.NestedClass() - >>>> type(n) - - >>>> - -* **data members**: Public data members are represented as python properties - and provide read and write access on instances as expected. - Private and protected data members are not accessible. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() - >>>> c.m_int - 42 - >>>> - -* **default arguments**: C++ default arguments work as expected, but python - keywords are not supported. - It is technically possible to support keywords, but for the C++ interface, - the formal argument names have no meaning and are not considered part of the - API, hence it is not a good idea to use keywords. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() # uses default argument - >>>> c.m_int - 42 - >>>> c = ConcreteClass(13) - >>>> c.m_int - 13 - >>>> - -* **doc strings**: The doc string of a method or function contains the C++ - arguments and return types of all overloads of that name, as applicable. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> print ConcreteClass.array_method.__doc__ - void ConcreteClass::array_method(int*, int) - void ConcreteClass::array_method(double*, int) - >>>> - -* **enums**: Are translated as ints with no further checking. - -* **functions**: Work as expected and live in their appropriate namespace - (which can be the global one, ``cppyy.gbl``). - -* **inheritance**: All combinations of inheritance on the C++ (single, - multiple, virtual) are supported in the binding. - However, new python classes can only use single inheritance from a bound C++ - class. - Multiple inheritance would introduce two "this" pointers in the binding. - This is a current, not a fundamental, limitation. - The C++ side will not see any overridden methods on the python side, as - cross-inheritance is planned but not yet supported. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> help(ConcreteClass) - Help on class ConcreteClass in module __main__: - - class ConcreteClass(AbstractClass) - | Method resolution order: - | ConcreteClass - | AbstractClass - | cppyy.CPPObject - | __builtin__.CPPInstance - | __builtin__.object - | - | Methods defined here: - | - | ConcreteClass(self, *args) - | ConcreteClass::ConcreteClass(const ConcreteClass&) - | ConcreteClass::ConcreteClass(int) - | ConcreteClass::ConcreteClass() - | - etc. .... - -* **memory**: C++ instances created by calling their constructor from python - are owned by python. - You can check/change the ownership with the _python_owns flag that every - bound instance carries. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> c = ConcreteClass() - >>>> c._python_owns # True: object created in Python - True - >>>> - -* **methods**: Are represented as python methods and work as expected. - They are first class objects and can be bound to an instance. - Virtual C++ methods work as expected. - To select a specific virtual method, do like with normal python classes - that override methods: select it from the class that you need, rather than - calling the method on the instance. - To select a specific overload, use the __dispatch__ special function, which - takes the name of the desired method and its signature (which can be - obtained from the doc string) as arguments. - -* **namespaces**: Are represented as python classes. - Namespaces are more open-ended than classes, so sometimes initial access may - result in updates as data and functions are looked up and constructed - lazily. - Thus the result of ``dir()`` on a namespace shows the classes available, - even if they may not have been created yet. - It does not show classes that could potentially be loaded by the class - loader. - Once created, namespaces are registered as modules, to allow importing from - them. - Namespace currently do not work with the class loader. - Fixing these bootstrap problems is on the TODO list. - The global namespace is ``cppyy.gbl``. - -* **NULL**: Is represented as ``cppyy.gbl.nullptr``. - In C++11, the keyword ``nullptr`` is used to represent ``NULL``. - For clarity of intent, it is recommended to use this instead of ``None`` - (or the integer ``0``, which can serve in some cases), as ``None`` is better - understood as ``void`` in C++. - -* **operator conversions**: If defined in the C++ class and a python - equivalent exists (i.e. all builtin integer and floating point types, as well - as ``bool``), it will map onto that python conversion. - Note that ``char*`` is mapped onto ``__str__``. - Example:: - - >>>> from cppyy.gbl import ConcreteClass - >>>> print ConcreteClass() - Hello operator const char*! - >>>> - -* **operator overloads**: If defined in the C++ class and if a python - equivalent is available (not always the case, think e.g. of ``operator||``), - then they work as expected. - Special care needs to be taken for global operator overloads in C++: first, - make sure that they are actually reflected, especially for the global - overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterate over a vector). - Second, make sure that reflection info is loaded in the proper order. - I.e. that these global overloads are available before use. - -* **pointers**: For builtin data types, see arrays. - For objects, a pointer to an object and an object looks the same, unless - the pointer is a data member. - In that case, assigning to the data member will cause a copy of the pointer - and care should be taken about the object's life time. - If a pointer is a global variable, the C++ side can replace the underlying - object and the python side will immediately reflect that. - -* **PyObject***: Arguments and return types of ``PyObject*`` can be used, and - passed on to CPython API calls. - Since these CPython-like objects need to be created and tracked (this all - happens through ``cpyext``) this interface is not particularly fast. - -* **static data members**: Are represented as python property objects on the - class and the meta-class. - Both read and write access is as expected. - -* **static methods**: Are represented as python's ``staticmethod`` objects - and can be called both from the class as well as from instances. - -* **strings**: The std::string class is considered a builtin C++ type and - mixes quite well with python's str. - Python's str can be passed where a ``const char*`` is expected, and an str - will be returned if the return type is ``const char*``. - -* **templated classes**: Are represented in a meta-class style in python. - This may look a little bit confusing, but conceptually is rather natural. - For example, given the class ``std::vector``, the meta-class part would - be ``std.vector``. - Then, to get the instantiation on ``int``, do ``std.vector(int)`` and to - create an instance of that class, do ``std.vector(int)()``:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info('libexampleDict.so') - >>>> cppyy.gbl.std.vector # template metatype - - >>>> cppyy.gbl.std.vector(int) # instantiates template -> class - '> - >>>> cppyy.gbl.std.vector(int)() # instantiates class -> object - <__main__.std::vector object at 0x00007fe480ba4bc0> - >>>> - - Note that templates can be build up by handing actual types to the class - instantiation (as done in this vector example), or by passing in the list of - template arguments as a string. - The former is a lot easier to work with if you have template instantiations - using classes that themselves are templates in the arguments (think e.g a - vector of vectors). - All template classes must already exist in the loaded reflection info, they - do not work (yet) with the class loader. - - For compatibility with other bindings generators, use of square brackets - instead of parenthesis to instantiate templates is supported as well. - -* **templated functions**: Automatically participate in overloading and are - used in the same way as other global functions. - -* **templated methods**: For now, require an explicit selection of the - template parameters. - This will be changed to allow them to participate in overloads as expected. - -* **typedefs**: Are simple python references to the actual classes to which - they refer. - -* **unary operators**: Are supported if a python equivalent exists, and if the - operator is defined in the C++ class. - -You can always find more detailed examples and see the full of supported -features by looking at the tests in pypy/module/cppyy/test. - -If a feature or reflection info is missing, this is supposed to be handled -gracefully. -In fact, there are unit tests explicitly for this purpose (even as their use -becomes less interesting over time, as the number of missing features -decreases). -Only when a missing feature is used, should there be an exception. -For example, if no reflection info is available for a return type, then a -class that has a method with that return type can still be used. -Only that one specific method can not be used. - - -Templates ---------- - -Templates can be automatically instantiated, assuming the appropriate header -files have been loaded or are accessible to the class loader. -This is the case for example for all of STL. -For example:: - - $ cat MyTemplate.h - #include - - class MyClass { - public: - MyClass(int i = -99) : m_i(i) {} - MyClass(const MyClass& s) : m_i(s.m_i) {} - MyClass& operator=(const MyClass& s) { m_i = s.m_i; return *this; } - ~MyClass() {} - int m_i; - }; - -Run the normal ``genreflex`` and compilation steps:: - - $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling - -Subsequent use should be as expected. -Note the meta-class style of "instantiating" the template:: - - >>>> import cppyy - >>>> cppyy.load_reflection_info("libTemplateDict.so") - >>>> std = cppyy.gbl.std - >>>> MyClass = cppyy.gbl.MyClass - >>>> v = std.vector(MyClass)() - >>>> v += [MyClass(1), MyClass(2), MyClass(3)] - >>>> for m in v: - .... print m.m_i, - .... - 1 2 3 - >>>> - -The arguments to the template instantiation can either be a string with the -full list of arguments, or the explicit classes. -The latter makes for easier code writing if the classes passed to the -instantiation are themselves templates. - - -The fast lane -------------- - -By default, cppyy will use direct function pointers through `CFFI`_ whenever -possible. If this causes problems for you, you can disable it by setting the -CPPYY_DISABLE_FASTPATH environment variable. - -.. _CFFI: https://cffi.readthedocs.io/en/latest/ - - -CPython -------- - -Most of the ideas in cppyy come originally from the `PyROOT`_ project, which -contains a CPython-based cppyy.py module (with similar dependencies as the -one that comes with PyPy). -A standalone pip-installable version is planned, but for now you can install -ROOT through your favorite distribution installer (available in the science -section). - -.. _PyROOT: https://root.cern.ch/pyroot - -There are a couple of minor differences between the two versions of cppyy -(the CPython version has a few more features). -Work is on-going to integrate the nightly tests of both to make sure their -feature sets are equalized. - - -Python3 -------- - -The CPython version of cppyy supports Python3, assuming your packager has -build the backend for it. -The cppyy module has not been tested with the `Py3k`_ version of PyPy. -Note that the generated reflection information (from ``genreflex``) is fully -independent of Python, and does not need to be rebuild when switching versions -or interpreters. - -.. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k - - -.. toctree:: - :hidden: - - cppyy_example diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -337,6 +337,8 @@ - ``frozenset`` (empty frozenset only) + - unbound method objects (for Python 2 only) + This change requires some changes to ``id`` as well. ``id`` fulfills the following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the above types will return a value that is computed from the argument, and can diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -61,29 +61,23 @@ .. _libffi: http://sourceware.org/libffi/ -Cling and cppyy ---------------- +cppyy +----- -The builtin :doc:`cppyy ` module uses reflection information, provided by -`Cling`_ (which needs to be `installed separately`_), of C/C++ code to -automatically generate bindings at runtime. -In Python, classes and functions are always runtime structures, so when they -are generated matters not for performance. -However, if the backend itself is capable of dynamic behavior, it is a much -better functional match, allowing tighter integration and more natural -language mappings. +For C++, `cppyy`_ is an automated bindings generator available for both +PyPy and CPython. +``cppyy`` relies on declarations from C++ header files to dynamically +construct Python equivalent classes, functions, variables, etc. +It is designed for use by large scale programs and supports modern C++. +With PyPy, it leverages the built-in ``_cppyy`` module, allowing the JIT to +remove most of the cross-language overhead. -The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove -most cross-language call overhead. +To install, run ``pip install cppyy``. +Further details are available in the `full documentation`_. -:doc:Full details are `available here `. +.. _cppyy: http://cppyy.readthedocs.org/ +.. _`full documentation`: http://cppyy.readthedocs.org/ -.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend -.. _Cling: https://root.cern.ch/cling - -.. toctree:: - - cppyy RPython Mixed Modules --------------------- diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -40,6 +40,9 @@ sure things are ported back to the trunk and to the branch as necessary. +* Maybe bump the SOABI number in module/imp/importing. This has many + implications, so make sure the PyPy community agrees to the change. + * Update and write documentation * update pypy/doc/contributor.rst (and possibly LICENSE) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -25,3 +25,8 @@ .. branch: cpyext-hash_notimpl If ``tp_hash`` is ``PyObject_HashNotImplemented``, set ``obj.__dict__['__hash__']`` to None + +.. branch: cppyy-packaging + +Renaming of ``cppyy`` to ``_cppyy``. +The former is now an external package installable with ``pip install cppyy``. diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3838,6 +3838,7 @@ assert result == samples for i in range(len(samples)): assert result[i] == p[i] and type(result[i]) is type(p[i]) + assert (type(result[i]) is bool) == (type(samples[i]) is bool) # BInt = new_primitive_type("int") py.test.raises(TypeError, unpack, p) diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/_cppyy/__init__.py rename from pypy/module/cppyy/__init__.py rename to pypy/module/_cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/_cppyy/__init__.py @@ -33,11 +33,11 @@ # pythonization functions may be written in RPython, but the interp2app # code generation is not, so give it a chance to run now - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi capi.register_pythonizations(space) def startup(self, space): - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi capi.verify_backend(space) # may raise ImportError space.call_method(self, '_init_pythonify') diff --git a/pypy/module/cppyy/backend/create_cppyy_package.py b/pypy/module/_cppyy/backend/create_cppyy_package.py rename from pypy/module/cppyy/backend/create_cppyy_package.py rename to pypy/module/_cppyy/backend/create_cppyy_package.py diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/_cppyy/bench/Makefile rename from pypy/module/cppyy/bench/Makefile rename to pypy/module/_cppyy/bench/Makefile diff --git a/pypy/module/cppyy/bench/bench02.cxx b/pypy/module/_cppyy/bench/bench02.cxx rename from pypy/module/cppyy/bench/bench02.cxx rename to pypy/module/_cppyy/bench/bench02.cxx diff --git a/pypy/module/cppyy/bench/bench02.h b/pypy/module/_cppyy/bench/bench02.h rename from pypy/module/cppyy/bench/bench02.h rename to pypy/module/_cppyy/bench/bench02.h diff --git a/pypy/module/cppyy/bench/bench02.xml b/pypy/module/_cppyy/bench/bench02.xml rename from pypy/module/cppyy/bench/bench02.xml rename to pypy/module/_cppyy/bench/bench02.xml diff --git a/pypy/module/cppyy/bench/hsimple.C b/pypy/module/_cppyy/bench/hsimple.C rename from pypy/module/cppyy/bench/hsimple.C rename to pypy/module/_cppyy/bench/hsimple.C diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/_cppyy/bench/hsimple.py rename from pypy/module/cppyy/bench/hsimple.py rename to pypy/module/_cppyy/bench/hsimple.py diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/_cppyy/bench/hsimple_rflx.py rename from pypy/module/cppyy/bench/hsimple_rflx.py rename to pypy/module/_cppyy/bench/hsimple_rflx.py diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/_cppyy/capi/__init__.py rename from pypy/module/cppyy/capi/__init__.py rename to pypy/module/_cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/_cppyy/capi/__init__.py @@ -9,10 +9,10 @@ # the selection of the desired backend (default is Reflex). # choose C-API access method: -from pypy.module.cppyy.capi.loadable_capi import * -#from pypy.module.cppyy.capi.builtin_capi import * +from pypy.module._cppyy.capi.loadable_capi import * +#from pypy.module._cppyy.capi.builtin_capi import * -from pypy.module.cppyy.capi.capi_types import C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_OBJECT,\ C_NULL_TYPE, C_NULL_OBJECT def direct_ptradd(ptr, offset): diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/_cppyy/capi/builtin_capi.py rename from pypy/module/cppyy/capi/builtin_capi.py rename to pypy/module/_cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/_cppyy/capi/builtin_capi.py @@ -4,7 +4,7 @@ import cling_capi as backend -from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ +from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/_cppyy/capi/capi_types.py rename from pypy/module/cppyy/capi/capi_types.py rename to pypy/module/_cppyy/capi/capi_types.py diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/_cppyy/capi/cling_capi.py rename from pypy/module/cppyy/capi/cling_capi.py rename to pypy/module/_cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/_cppyy/capi/cling_capi.py @@ -11,7 +11,7 @@ from rpython.rlib import jit, libffi, rdynload from pypy.module._rawffi.array import W_ArrayInstance -from pypy.module.cppyy.capi.capi_types import C_OBJECT +from pypy.module._cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -99,7 +99,7 @@ def stdstring_c_str(space, w_self): """Return a python string taking into account \0""" - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) @@ -112,12 +112,12 @@ W_AbstractSeqIterObject.__init__(self, w_vector) # TODO: this should live in rpythonize.py or something so that the # imports can move to the top w/o getting circles - from pypy.module.cppyy import interp_cppyy + from pypy.module._cppyy import interp_cppyy assert isinstance(w_vector, interp_cppyy.W_CPPInstance) vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) self.overload = vector.cppclass.get_overload("__getitem__") - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) @@ -131,7 +131,7 @@ self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) - from pypy.module.cppyy import converter + from pypy.module._cppyy import converter self.converter = converter.get_converter(space, v_type, '') self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) self.stride = v_size @@ -143,7 +143,7 @@ self.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) try: - from pypy.module.cppyy import capi # TODO: refector + from pypy.module._cppyy import capi # TODO: refector offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) except OperationError as e: @@ -186,7 +186,7 @@ _method_alias(space, w_pycppclass, "__str__", "c_str") if "vector" in name[:11]: # len('std::vector') == 11 - from pypy.module.cppyy import capi + from pypy.module._cppyy import capi v_type = capi.c_stdvector_valuetype(space, name) if v_type: space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) diff --git a/pypy/module/_cppyy/capi/loadable_capi.py b/pypy/module/_cppyy/capi/loadable_capi.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cppyy/capi/loadable_capi.py @@ -0,0 +1,629 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel +from rpython.rlib.rarithmetic import r_singlefloat +from rpython.tool import leakfinder + +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.error import oefmt + +from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module._cppyy import ffitypes + +from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR + + +reflection_library = 'libcppyy_backend.so' + +def identify(): + return 'loadable_capi' + +# this is not technically correct, but will do for now +std_string_name = 'std::basic_string' + +class _Arg: # poor man's union + _immutable_ = True + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc + self._handle = h + self._long = l + self._string = s + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) + +# For the loadable CAPI, the calls start and end in RPython. Therefore, the standard +# _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some +# vars (e.g. void* equivalent) can not be wrapped, and others (such as rfloat) risk +# rounding problems. This W_RCTypeFun then, takes args, instead of args_w. Note that +# rcall() is a new method, so as to not interfere with the base class call and _call +# when rtyping. It is also called directly (see call_capi below). +class W_RCTypeFunc(ctypefunc.W_CTypeFunc): + @jit.unroll_safe + def rcall(self, funcaddr, args): + assert self.cif_descr + self = jit.promote(self) + # no checking of len(args) needed, as calls in this context are not dynamic + + # The following code is functionally similar to W_CTypeFunc._call, but its + # implementation is tailored to the restricted use (include memory handling) + # of the CAPI calls. + space = self.space + cif_descr = self.cif_descr + size = cif_descr.exchange_size + raw_string = rffi.cast(rffi.CCHARP, 0) # only ever have one in the CAPI + buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw') + try: + for i in range(len(args)): + data = rffi.ptradd(buffer, cif_descr.exchange_args[i]) + obj = args[i] + argtype = self.fargs[i] + # the following is clumsy, but the data types used as arguments are + # very limited, so it'll do for now + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) + misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) + misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) + data = rffi.cast(rffi.VOIDPP, data) + data[0] = obj._voidp + else: # only other use is sring + assert obj.tc == 's' + n = len(obj._string) + assert raw_string == rffi.cast(rffi.CCHARP, 0) + # XXX could use rffi.get_nonmovingbuffer_final_null() + raw_string = rffi.str2charp(obj._string) + data = rffi.cast(rffi.CCHARPP, data) + data[0] = raw_string + + jit_libffi.jit_ffi_call(cif_descr, + rffi.cast(rffi.VOIDP, funcaddr), + buffer) + + resultdata = rffi.ptradd(buffer, cif_descr.exchange_result) + # this wrapping is unnecessary, but the assumption is that given the + # immediate unwrapping, the round-trip is removed + w_res = self.ctitem.copy_and_convert_to_object(resultdata) + finally: + if raw_string != rffi.cast(rffi.CCHARP, 0): + rffi.free_charp(raw_string) + lltype.free(buffer, flavor='raw') + return w_res + +class State(object): + def __init__(self, space): + self.library = None + self.capi_calls = {} + + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types + + # TODO: the following need to match up with the globally defined C_XYZ low-level + # types (see capi/__init__.py), but by using strings here, that isn't guaranteed + c_opaque_ptr = state.c_ulong + + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp + + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble + + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp + + c_size_t = nt.new_primitive_type(space, 'size_t') + c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') + + self.capi_call_ifaces = { + # name to opaque C++ scope representation + 'num_scopes' : ([c_scope], c_int), + 'scope_name' : ([c_scope, c_int], c_ccharp), + + 'resolve_name' : ([c_ccharp], c_ccharp), + 'get_scope' : ([c_ccharp], c_scope), + 'actual_class' : ([c_type, c_object], c_type), + + # memory management + 'allocate' : ([c_type], c_object), + 'deallocate' : ([c_type, c_object], c_void), + 'destruct' : ([c_type, c_object], c_void), + + # method/function dispatching + 'call_v' : ([c_method, c_object, c_int, c_voidp], c_void), + 'call_b' : ([c_method, c_object, c_int, c_voidp], c_uchar), + 'call_c' : ([c_method, c_object, c_int, c_voidp], c_char), + + 'call_h' : ([c_method, c_object, c_int, c_voidp], c_short), + 'call_i' : ([c_method, c_object, c_int, c_voidp], c_int), + 'call_l' : ([c_method, c_object, c_int, c_voidp], c_long), + 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), + 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), + 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), + + 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), + + 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), + 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), + + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify + + # handling of function argument buffer + 'allocate_function_args' : ([c_int], c_voidp), + 'deallocate_function_args' : ([c_voidp], c_void), + 'function_arg_sizeof' : ([], c_size_t), + 'function_arg_typeoffset' : ([], c_size_t), + + # scope reflection information + 'is_namespace' : ([c_scope], c_int), + 'is_template' : ([c_ccharp], c_int), + 'is_abstract' : ([c_type], c_int), + 'is_enum' : ([c_ccharp], c_int), + + # type/class reflection information + 'final_name' : ([c_type], c_ccharp), + 'scoped_final_name' : ([c_type], c_ccharp), + 'has_complex_hierarchy' : ([c_type], c_int), + 'num_bases' : ([c_type], c_int), + 'base_name' : ([c_type, c_int], c_ccharp), + 'is_subtype' : ([c_type, c_type], c_int), + + 'base_offset' : ([c_type, c_type, c_object, c_int], c_ptrdiff_t), + + # method/function reflection information + 'num_methods' : ([c_scope], c_int), + 'method_index_at' : ([c_scope, c_int], c_index), + 'method_indices_from_name' : ([c_scope, c_ccharp], c_index_array), + + 'method_name' : ([c_scope, c_index], c_ccharp), + 'method_result_type' : ([c_scope, c_index], c_ccharp), + 'method_num_args' : ([c_scope, c_index], c_int), + 'method_req_args' : ([c_scope, c_index], c_int), + 'method_arg_type' : ([c_scope, c_index, c_int], c_ccharp), + 'method_arg_default' : ([c_scope, c_index, c_int], c_ccharp), + 'method_signature' : ([c_scope, c_index], c_ccharp), + + 'method_is_template' : ([c_scope, c_index], c_int), + 'method_num_template_args' : ([c_scope, c_index], c_int), + 'method_template_arg_name' : ([c_scope, c_index, c_index], c_ccharp), + + 'get_method' : ([c_scope, c_index], c_method), + 'get_global_operator' : ([c_scope, c_scope, c_scope, c_ccharp], c_index), + + # method properties + 'is_constructor' : ([c_type, c_index], c_int), + 'is_staticmethod' : ([c_type, c_index], c_int), + + # data member reflection information + 'num_datamembers' : ([c_scope], c_int), + 'datamember_name' : ([c_scope, c_int], c_ccharp), + 'datamember_type' : ([c_scope, c_int], c_ccharp), + 'datamember_offset' : ([c_scope, c_int], c_ptrdiff_t), + + 'datamember_index' : ([c_scope, c_ccharp], c_int), + + # data member properties + 'is_publicdata' : ([c_scope, c_int], c_int), + 'is_staticdata' : ([c_scope, c_int], c_int), + + # misc helpers + 'strtoll' : ([c_ccharp], c_llong), + 'strtoull' : ([c_ccharp], c_ullong), + 'free' : ([c_voidp], c_void), + + 'charp2stdstring' : ([c_ccharp, c_size_t], c_object), + #stdstring2charp actually takes an size_t* as last parameter, but this will do + 'stdstring2charp' : ([c_object, c_voidp], c_ccharp), + 'stdstring2stdstring' : ([c_object], c_object), + + 'stdvector_valuetype' : ([c_ccharp], c_ccharp), + 'stdvector_valuesize' : ([c_ccharp], c_size_t), + + } + + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + +def load_reflection_library(space): + state = space.fromcache(State) + if state.library is None: + from pypy.module._cffi_backend.libraryobj import W_Library + state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + return state.library + +def verify_backend(space): + try: + load_reflection_library(space) + except Exception: + if objectmodel.we_are_translated(): + raise oefmt(space.w_ImportError, + "missing reflection library %s", reflection_library) + return False + return True + +def call_capi(space, name, args): + state = space.fromcache(State) + try: + c_call = state.capi_calls[name] + except KeyError: + if state.library is None: + load_reflection_library(space) + iface = state.capi_call_ifaces[name] + cfunc = W_RCTypeFunc(space, iface[0], iface[1], False) + c_call = state.library.load_function(cfunc, 'cppyy_'+name) + # TODO: there must be a better way to trick the leakfinder ... + if not objectmodel.we_are_translated(): + leakfinder.remember_free(c_call.ctype.cif_descr._obj0) + state.capi_calls[name] = c_call + with c_call as ptr: + return c_call.ctype.rcall(ptr, args) + +def _cdata_to_cobject(space, w_cdata): + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) + +def _cdata_to_size_t(space, w_cdata): + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) + +def _cdata_to_ptrdiff_t(space, w_cdata): + return rffi.cast(rffi.LONG, space.int_w(w_cdata)) + +def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow + w_cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) + ptr = w_cdata.unsafe_escaping_ptr() + return rffi.cast(rffi.VOIDP, ptr) + +def _cdata_to_ccharp(space, w_cdata): + ptr = _cdata_to_ptr(space, w_cdata) # see above ... something better? + return rffi.cast(rffi.CCHARP, ptr) + +def c_load_dictionary(name): + return libffi.CDLL(name) + +# name to opaque C++ scope representation ------------------------------------ +def c_num_scopes(space, cppscope): + return space.int_w(call_capi(space, 'num_scopes', [_ArgH(cppscope.handle)])) +def c_scope_name(space, cppscope, iscope): + args = [_ArgH(cppscope.handle), _ArgL(iscope)] + return charp2str_free(space, call_capi(space, 'scope_name', args)) + +def c_resolve_name(space, name): + return charp2str_free(space, call_capi(space, 'resolve_name', [_ArgS(name)])) +def c_get_scope_opaque(space, name): + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_ArgS(name)]))) +def c_actual_class(space, cppclass, cppobj): + args = [_ArgH(cppclass.handle), _ArgH(cppobj)] + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) + +# memory management ---------------------------------------------------------- +def c_allocate(space, cppclass): + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_ArgH(cppclass.handle)])) +def c_deallocate(space, cppclass, cppobject): + call_capi(space, 'deallocate', [_ArgH(cppclass.handle), _ArgH(cppobject)]) +def c_destruct(space, cppclass, cppobject): + call_capi(space, 'destruct', [_ArgH(cppclass.handle), _ArgH(cppobject)]) + +# method/function dispatching ------------------------------------------------ +def c_call_v(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + call_capi(space, 'call_v', args) +def c_call_b(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) +def c_call_c(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.CHAR, space.bytes_w(call_capi(space, 'call_c', args))[0]) +def c_call_h(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) +def c_call_i(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) +def c_call_l(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) +def c_call_ll(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) +def c_call_f(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) +def c_call_d(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) +def c_call_ld(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONGDOUBLE, space.float_w(call_capi(space, 'call_ld', args))) + +def c_call_r(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) +def c_call_s(space, cppmethod, cppobject, nargs, cargs): + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'call_s', + [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), + _ArgP(rffi.cast(rffi.VOIDP, length))]) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return _cdata_to_ccharp(space, w_cstr), cstr_len + +def c_constructor(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) +def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), _ArgH(cppclass.handle)] + return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) + +def c_get_function_address(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return rffi.cast(C_FUNC_PTR, + _cdata_to_ptr(space, call_capi(space, 'get_function_address', args))) + +# handling of function argument buffer --------------------------------------- +def c_allocate_function_args(space, size): + return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_ArgL(size)])) +def c_deallocate_function_args(space, cargs): + call_capi(space, 'deallocate_function_args', [_ArgP(cargs)]) +def c_function_arg_sizeof(space): + state = space.fromcache(State) + return state.c_sizeof_farg +def c_function_arg_typeoffset(space): + state = space.fromcache(State) + return state.c_offset_farg + +# scope reflection information ----------------------------------------------- +def c_is_namespace(space, scope): + return space.bool_w(call_capi(space, 'is_namespace', [_ArgH(scope)])) +def c_is_template(space, name): + return space.bool_w(call_capi(space, 'is_template', [_ArgS(name)])) +def c_is_abstract(space, cpptype): + return space.bool_w(call_capi(space, 'is_abstract', [_ArgH(cpptype)])) +def c_is_enum(space, name): + return space.bool_w(call_capi(space, 'is_enum', [_ArgS(name)])) + +# type/class reflection information ------------------------------------------ +def c_final_name(space, cpptype): + return charp2str_free(space, call_capi(space, 'final_name', [_ArgH(cpptype)])) +def c_scoped_final_name(space, cpptype): + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_ArgH(cpptype)])) +def c_has_complex_hierarchy(space, handle): + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_ArgH(handle)])) +def c_num_bases(space, cppclass): + return space.int_w(call_capi(space, 'num_bases', [_ArgH(cppclass.handle)])) +def c_base_name(space, cppclass, base_index): + args = [_ArgH(cppclass.handle), _ArgL(base_index)] + return charp2str_free(space, call_capi(space, 'base_name', args)) +def c_is_subtype(space, derived, base): + jit.promote(base) + if derived == base: + return bool(1) + return space.bool_w(call_capi(space, 'is_subtype', [_ArgH(derived.handle), _ArgH(base.handle)])) + +def _c_base_offset(space, derived_h, base_h, address, direction): + args = [_ArgH(derived_h), _ArgH(base_h), _ArgH(address), _ArgL(direction)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) +def c_base_offset(space, derived, base, address, direction): + if derived == base: + return rffi.cast(rffi.LONG, 0) + return _c_base_offset(space, derived.handle, base.handle, address, direction) +def c_base_offset1(space, derived_h, base, address, direction): + return _c_base_offset(space, derived_h, base.handle, address, direction) + +# method/function reflection information ------------------------------------- +def c_num_methods(space, cppscope): + args = [_ArgH(cppscope.handle)] + return space.int_w(call_capi(space, 'num_methods', args)) +def c_method_index_at(space, cppscope, imethod): + args = [_ArgH(cppscope.handle), _ArgL(imethod)] + return space.int_w(call_capi(space, 'method_index_at', args)) +def c_method_indices_from_name(space, cppscope, name): + args = [_ArgH(cppscope.handle), _ArgS(name)] + indices = rffi.cast(C_INDEX_ARRAY, + _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) + if not indices: + return [] + py_indices = [] + i = 0 + index = indices[i] + while index != -1: + i += 1 + py_indices.append(index) + index = indices[i] + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below + return py_indices + +def c_method_name(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return charp2str_free(space, call_capi(space, 'method_name', args)) +def c_method_result_type(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return charp2str_free(space, call_capi(space, 'method_result_type', args)) +def c_method_num_args(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return space.int_w(call_capi(space, 'method_num_args', args)) +def c_method_req_args(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return space.int_w(call_capi(space, 'method_req_args', args)) +def c_method_arg_type(space, cppscope, index, arg_index): + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] + return charp2str_free(space, call_capi(space, 'method_arg_type', args)) +def c_method_arg_default(space, cppscope, index, arg_index): + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] + return charp2str_free(space, call_capi(space, 'method_arg_default', args)) +def c_method_signature(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return charp2str_free(space, call_capi(space, 'method_signature', args)) + +def c_method_is_template(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return space.bool_w(call_capi(space, 'method_is_template', args)) +def _c_method_num_template_args(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return space.int_w(call_capi(space, 'method_num_template_args', args)) +def c_template_args(space, cppscope, index): + nargs = _c_method_num_template_args(space, cppscope, index) + arg1 = _ArgH(cppscope.handle) + arg2 = _ArgL(index) + args = [c_resolve_name(space, charp2str_free(space, + call_capi(space, 'method_template_arg_name', [arg1, arg2, _ArgL(iarg)])) + ) for iarg in range(nargs)] + return args + +def c_get_method(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) +def c_get_global_operator(space, nss, lc, rc, op): + if nss is not None: + args = [_ArgH(nss.handle), _ArgH(lc.handle), _ArgH(rc.handle), _ArgS(op)] + return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) + return rffi.cast(WLAVC_INDEX, -1) + +# method properties ---------------------------------------------------------- +def c_is_constructor(space, cppclass, index): + args = [_ArgH(cppclass.handle), _ArgL(index)] + return space.bool_w(call_capi(space, 'is_constructor', args)) +def c_is_staticmethod(space, cppclass, index): + args = [_ArgH(cppclass.handle), _ArgL(index)] + return space.bool_w(call_capi(space, 'is_staticmethod', args)) + +# data member reflection information ----------------------------------------- +def c_num_datamembers(space, cppscope): + return space.int_w(call_capi(space, 'num_datamembers', [_ArgH(cppscope.handle)])) +def c_datamember_name(space, cppscope, datamember_index): + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] + return charp2str_free(space, call_capi(space, 'datamember_name', args)) +def c_datamember_type(space, cppscope, datamember_index): + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] + return charp2str_free(space, call_capi(space, 'datamember_type', args)) +def c_datamember_offset(space, cppscope, datamember_index): + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) + +def c_datamember_index(space, cppscope, name): + args = [_ArgH(cppscope.handle), _ArgS(name)] + return space.int_w(call_capi(space, 'datamember_index', args)) + +# data member properties ----------------------------------------------------- +def c_is_publicdata(space, cppscope, datamember_index): + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] + return space.bool_w(call_capi(space, 'is_publicdata', args)) +def c_is_staticdata(space, cppscope, datamember_index): + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] + return space.bool_w(call_capi(space, 'is_staticdata', args)) + +# misc helpers --------------------------------------------------------------- +def c_strtoll(space, svalue): + return space.r_longlong_w(call_capi(space, 'strtoll', [_ArgS(svalue)])) +def c_strtoull(space, svalue): + return space.r_ulonglong_w(call_capi(space, 'strtoull', [_ArgS(svalue)])) +def c_free(space, voidp): + call_capi(space, 'free', [_ArgP(voidp)]) + +def charp2str_free(space, cdata): + charp = rffi.cast(rffi.CCHARP, _cdata_to_ptr(space, cdata)) + pystr = rffi.charp2str(charp) + c_free(space, rffi.cast(rffi.VOIDP, charp)) + return pystr + +def c_charp2stdstring(space, svalue, sz): + return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', + [_ArgS(svalue), _ArgH(rffi.cast(rffi.ULONG, sz))])) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'stdstring2charp', + [_ArgH(cppstr), _ArgP(rffi.cast(rffi.VOIDP, sz))]) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(_cdata_to_ccharp(space, w_cstr), cstr_len) +def c_stdstring2stdstring(space, cppobject): + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_ArgH(cppobject)])) + +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) + +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) +def c_stdvector_valuesize(space, pystr): + return _cdata_to_size_t(space, call_capi(space, 'stdvector_valuesize', [_ArgS(pystr)])) + + +# TODO: factor these out ... +# pythonizations +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module._cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.newtext(c_stdstring2charp(space, cppstr._rawobject)) + +# setup pythonizations for later use at run-time +_pythonizations = {} +def register_pythonizations(space): + "NOT_RPYTHON" + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = interp2app(f).spacebind(space) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.newtext(m1), + space.getattr(w_pycppclass, space.newtext(m2))) + +def pythonize(space, name, w_pycppclass): + if name == "string": + space.setattr(w_pycppclass, space.newtext("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cppyy/converter.py @@ -0,0 +1,893 @@ +import sys + +from pypy.interpreter.error import OperationError, oefmt + +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat +from rpython.rlib import rfloat + +from pypy.module._rawffi.interp_rawffi import letter2tp +from pypy.module._rawffi.array import W_Array, W_ArrayInstance + +from pypy.module._cppyy import helper, capi, ffitypes + +# Converter objects are used to translate between RPython and C++. They are +# defined by the type name for which they provide conversion. Uses are for +# function arguments, as well as for read and write access to data members. +# All type conversions are fully checked. +# +# Converter instances are greated by get_converter(), see below. +# The name given should be qualified in case there is a specialised, exact +# match for the qualified type. + + +def get_rawobject(space, w_obj): + from pypy.module._cppyy.interp_cppyy import W_CPPInstance + cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) + if cppinstance: + rawobject = cppinstance.get_rawobject() + assert lltype.typeOf(rawobject) == capi.C_OBJECT + return rawobject + return capi.C_NULL_OBJECT + +def set_rawobject(space, w_obj, address): + from pypy.module._cppyy.interp_cppyy import W_CPPInstance + cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) + if cppinstance: + assert lltype.typeOf(cppinstance._rawobject) == capi.C_OBJECT + cppinstance._rawobject = rffi.cast(capi.C_OBJECT, address) + +def get_rawobject_nonnull(space, w_obj): + from pypy.module._cppyy.interp_cppyy import W_CPPInstance + cppinstance = space.interp_w(W_CPPInstance, w_obj, can_be_None=True) + if cppinstance: + cppinstance._nullcheck() + rawobject = cppinstance.get_rawobject() + assert lltype.typeOf(rawobject) == capi.C_OBJECT + return rawobject + return capi.C_NULL_OBJECT + +def is_nullpointer_specialcase(space, w_obj): + # 0, None, and nullptr may serve as "NULL", check for any of them + + # integer 0 + try: + return space.int_w(w_obj) == 0 + except Exception: + pass + # None or nullptr + from pypy.module._cppyy import interp_cppyy + return space.is_true(space.is_(w_obj, space.w_None)) or \ + space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) + +def get_rawbuffer(space, w_obj): + # raw buffer + try: + buf = space.getarg_w('s*', w_obj) + return rffi.cast(rffi.VOIDP, buf.get_raw_address()) + except Exception: + pass + # array type + try: + arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True) + if arr: + return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + except Exception: + pass + # pre-defined NULL + if is_nullpointer_specialcase(space, w_obj): + return rffi.cast(rffi.VOIDP, 0) + raise TypeError("not an addressable buffer") + + +class TypeConverter(object): + _immutable_fields_ = ['cffi_name', 'uses_local', 'name'] + + cffi_name = None + uses_local = False + name = "" + + def __init__(self, space, extra): + pass + + def _get_raw_address(self, space, w_obj, offset): + rawobject = get_rawobject_nonnull(space, w_obj) + assert lltype.typeOf(rawobject) == capi.C_OBJECT + if rawobject: + fieldptr = capi.direct_ptradd(rawobject, offset) + else: + fieldptr = rffi.cast(capi.C_OBJECT, offset) + return fieldptr + + def _is_abstract(self, space): + raise oefmt(space.w_TypeError, + "no converter available for '%s'", self.name) + + def cffi_type(self, space): + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + + def convert_argument(self, space, w_obj, address, call_local): + self._is_abstract(space) + + def convert_argument_libffi(self, space, w_obj, address, call_local): + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + + def default_argument_libffi(self, space, address): + from pypy.module._cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + + def from_memory(self, space, w_obj, w_pycppclass, offset): + self._is_abstract(space) + + def to_memory(self, space, w_obj, w_value, offset): + self._is_abstract(space) + + def finalize_call(self, space, w_obj, call_local): + pass + + def free_argument(self, space, arg, call_local): + pass + + +class ArrayCache(object): + def __init__(self, space): + self.space = space + def __getattr__(self, name): + if name.startswith('array_'): + typecode = name[len('array_'):] + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) + setattr(self, name, arr) + return arr + raise AttributeError(name) + + def _freeze_(self): + return True + +class ArrayTypeConverterMixin(object): + _mixin_ = True + _immutable_fields_ = ['size'] + + def __init__(self, space, array_size): + if array_size <= 0: + self.size = sys.maxint + else: + self.size = array_size + + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + + def from_memory(self, space, w_obj, w_pycppclass, offset): + # read access, so no copy needed + address_value = self._get_raw_address(space, w_obj, offset) + address = rffi.cast(rffi.ULONG, address_value) + cache = space.fromcache(ArrayCache) + arr = getattr(cache, 'array_' + self.typecode) + return arr.fromaddress(space, address, self.size) + + def to_memory(self, space, w_obj, w_value, offset): + # copy the full array (uses byte copy for now) + address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) + buf = space.getarg_w('s*', w_value) + # TODO: report if too many items given? + for i in range(min(self.size*self.typesize, buf.getlength())): + address[i] = buf.getitem(i) + + +class PtrTypeConverterMixin(object): + _mixin_ = True + _immutable_fields_ = ['size'] + + def __init__(self, space, array_size): + self.size = sys.maxint + + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + + def convert_argument(self, space, w_obj, address, call_local): + w_tc = space.findattr(w_obj, space.newtext('typecode')) + if w_tc is not None and space.text_w(w_tc) != self.typecode: + raise oefmt(space.w_TypeError, + "expected %s pointer type, but received %s", + self.typecode, space.text_w(w_tc)) + x = rffi.cast(rffi.VOIDPP, address) + try: + x[0] = rffi.cast(rffi.VOIDP, get_rawbuffer(space, w_obj)) + except TypeError: + raise oefmt(space.w_TypeError, + "raw buffer interface not supported") + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'o' + + def from_memory(self, space, w_obj, w_pycppclass, offset): + # read access, so no copy needed + address_value = self._get_raw_address(space, w_obj, offset) + address = rffi.cast(rffi.ULONGP, address_value) + cache = space.fromcache(ArrayCache) + arr = getattr(cache, 'array_' + self.typecode) + return arr.fromaddress(space, address[0], self.size) + + def to_memory(self, space, w_obj, w_value, offset): + # copy only the pointer value + rawobject = get_rawobject_nonnull(space, w_obj) + byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) + buf = space.getarg_w('s*', w_value) + try: + byteptr[0] = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "raw buffer interface not supported") + + +class NumericTypeConverterMixin(object): + _mixin_ = True + + def convert_argument_libffi(self, space, w_obj, address, call_local): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self._unwrap_object(space, w_obj) + + def default_argument_libffi(self, space, address): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self.default + + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = self._get_raw_address(space, w_obj, offset) + rffiptr = rffi.cast(self.c_ptrtype, address) + return self._wrap_object(space, rffiptr[0]) + + def to_memory(self, space, w_obj, w_value, offset): + address = self._get_raw_address(space, w_obj, offset) + rffiptr = rffi.cast(self.c_ptrtype, address) + rffiptr[0] = self._unwrap_object(space, w_value) + +class ConstRefNumericTypeConverterMixin(NumericTypeConverterMixin): + _mixin_ = True + _immutable_fields_ = ['uses_local'] + + uses_local = True + + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp From pypy.commits at gmail.com Fri Jul 28 11:50:15 2017 From: pypy.commits at gmail.com (rlamy) Date: Fri, 28 Jul 2017 08:50:15 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Make cpyext cleanup more thorough Message-ID: <597b5d37.b6acdf0a.519cb.254b@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91989:0c11b9c793de Date: 2017-07-28 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/0c11b9c793de/ Log: Make cpyext cleanup more thorough diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -83,8 +83,9 @@ def cleanup(self): self.space.getexecutioncontext().cleanup_cpyext_state() - rawrefcount._collect() - self.space.user_del_action._run_finalizers() + for _ in range(5): + rawrefcount._collect() + self.space.user_del_action._run_finalizers() leakfinder.stop_tracking_allocations(check=False) assert not self.space.finalizer_queue.next_dead() From pypy.commits at gmail.com Fri Jul 28 14:22:58 2017 From: pypy.commits at gmail.com (arigo) Date: Fri, 28 Jul 2017 11:22:58 -0700 (PDT) Subject: [pypy-commit] pypy default: Explain a change in whatsnew Message-ID: <597b8102.6593df0a.3c68.e349@mx.google.com> Author: Armin Rigo Branch: Changeset: r91990:e127f014a776 Date: 2017-07-28 20:22 +0200 http://bitbucket.org/pypy/pypy/changeset/e127f014a776/ Log: Explain a change in whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,14 @@ .. this is a revision shortly after release-pypy2.7-v5.8.0 .. startrev: 558bd00b3dd8 +In previous versions of PyPy, ``instance.method`` would return always +the same bound method object, when gotten out of the same instance (as +far as ``is`` and ``id()`` can tell). CPython doesn't do that. Now +PyPy, like CPython, returns a different bound method object every time. +For ``type.method``, PyPy2 still returns always the same *unbound* +method object; CPython does it for built-in types but not for +user-defined types. + .. branch: cffi-complex .. branch: cffi-char16-char32 From pypy.commits at gmail.com Sat Jul 29 07:52:18 2017 From: pypy.commits at gmail.com (tobweber) Date: Sat, 29 Jul 2017 04:52:18 -0700 (PDT) Subject: [pypy-commit] stmgc c8-overheads-instrumentation: Add warmup complete event Message-ID: <597c76f2.500a1c0a.5d178.d317@mx.google.com> Author: Tobias Weber Branch: c8-overheads-instrumentation Changeset: r2129:1432eab6963e Date: 2017-07-29 11:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/1432eab6963e/ Log: Add warmup complete event diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -575,6 +575,8 @@ STM_GC_MAJOR_DONE, /* execution duration profiling events */ + STM_WARMUP_COMPLETE, + STM_DURATION_START_TRX, STM_DURATION_WRITE_GC_ONLY, STM_DURATION_WRITE_SLOWPATH, @@ -605,6 +607,7 @@ "gc major start", \ "gc major done", \ /* names of duration events */ \ + "marks completion of benchmark warm up phase" \ "duration of transaction start", \ "duration of gc due to write", \ "duration of write slowpath", \ From pypy.commits at gmail.com Sun Jul 30 10:33:55 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 30 Jul 2017 07:33:55 -0700 (PDT) Subject: [pypy-commit] pypy gc-del-limit-growth: The bounds in the GC need to be fixed, when allocating a lot of objects Message-ID: <597dee53.4eec1c0a.27eb8.b03c@mx.google.com> Author: Armin Rigo Branch: gc-del-limit-growth Changeset: r91991:c58234754604 Date: 2017-07-30 16:31 +0200 http://bitbucket.org/pypy/pypy/changeset/c58234754604/ Log: The bounds in the GC need to be fixed, when allocating a lot of objects with finalizers (issue #2590) From pypy.commits at gmail.com Sun Jul 30 10:33:57 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 30 Jul 2017 07:33:57 -0700 (PDT) Subject: [pypy-commit] pypy gc-del-limit-growth: Add failing test Message-ID: <597dee55.8f871c0a.e843f.06a3@mx.google.com> Author: Armin Rigo Branch: gc-del-limit-growth Changeset: r91992:f9500da6edd2 Date: 2017-07-30 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/f9500da6edd2/ Log: Add failing test diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -1099,6 +1099,42 @@ self.interpret(fn, []) + def test_bounded_memory_when_allocating_with_finalizers(self): + # Issue #2590: when allocating a lot of objects with a finalizer + # and little else, the bounds in the (inc)minimark GC are not + # set up reasonably and the total memory usage grows without + # limit. + class B(object): + pass + b = B() + b.num_deleted = 0 + class A(object): + def __init__(self): + fq.register_finalizer(self) + class FQ(rgc.FinalizerQueue): + Class = A + def finalizer_trigger(self): + while True: + a = self.next_dead() + if a is None: + break + b.num_deleted += 1 + fq = FQ() + def f(x, y): + i = 0 + alive_max = 0 + while i < x: + i += 1 + a = A() + a.x = a.y = a.z = i + #print i - b.num_deleted, b.num_deleted + alive = i - b.num_deleted + assert alive >= 0 + alive_max = max(alive_max, alive) + return alive_max + res = self.interpret(f, [1000, 0]) + assert res < 50 + from rpython.rlib.objectmodel import UnboxedValue From pypy.commits at gmail.com Sun Jul 30 10:59:58 2017 From: pypy.commits at gmail.com (arigo) Date: Sun, 30 Jul 2017 07:59:58 -0700 (PDT) Subject: [pypy-commit] pypy gc-del-limit-growth: Fix the test, at least for minimark and incminimark. Message-ID: <597df46e.2a86df0a.cb425.71ce@mx.google.com> Author: Armin Rigo Branch: gc-del-limit-growth Changeset: r91993:264b56beda7a Date: 2017-07-30 16:59 +0200 http://bitbucket.org/pypy/pypy/changeset/264b56beda7a/ Log: Fix the test, at least for minimark and incminimark. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2308,6 +2308,7 @@ ll_assert(not (self.probably_young_objects_with_finalizers .non_empty()), "probably_young_objects_with_finalizers should be empty") + self.kept_alive_by_finalizer = r_uint(0) if self.old_objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() elif self.old_objects_with_weakrefs.non_empty(): @@ -2380,6 +2381,9 @@ # we currently have -- but no more than 'max_delta' more than # we currently have. total_memory_used = float(self.get_total_memory_used()) + total_memory_used -= float(self.kept_alive_by_finalizer) + if total_memory_used < 0: + total_memory_used = 0 bounded = self.set_major_threshold_from( min(total_memory_used * self.major_collection_threshold, total_memory_used + self.max_delta), @@ -2418,7 +2422,7 @@ self.execute_finalizers() #END FINALIZING else: - pass #XXX which exception to raise here. Should be unreachable. + ll_assert(False, "bogus gc_state") debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") @@ -2784,8 +2788,17 @@ def _bump_finalization_state_from_0_to_1(self, obj): ll_assert(self._finalization_state(obj) == 0, "unexpected finalization state != 0") + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) hdr = self.header(obj) hdr.tid |= GCFLAG_FINALIZATION_ORDERING + # A bit hackish, but we will not count these objects as "alive" + # for the purpose of computing when the next major GC should + # occur. This is done for issue #2590: without this, if we + # allocate mostly objects with finalizers, the + # next_major_collection_threshold grows forever and actual + # memory usage is not bounded. + self.kept_alive_by_finalizer += raw_malloc_usage(totalsize) def _recursively_bump_finalization_state_from_2_to_3(self, obj): ll_assert(self._finalization_state(obj) == 2, diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1636,6 +1636,7 @@ # with a finalizer and all objects reachable from there (and also # moves some objects from 'objects_with_finalizers' to # 'run_finalizers'). + self.kept_alive_by_finalizer = r_uint(0) if self.old_objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() # @@ -1678,6 +1679,9 @@ # we currently have -- but no more than 'max_delta' more than # we currently have. total_memory_used = float(self.get_total_memory_used()) + total_memory_used -= float(self.kept_alive_by_finalizer) + if total_memory_used < 0: + total_memory_used = 0 bounded = self.set_major_threshold_from( min(total_memory_used * self.major_collection_threshold, total_memory_used + self.max_delta), @@ -1999,8 +2003,11 @@ def _bump_finalization_state_from_0_to_1(self, obj): ll_assert(self._finalization_state(obj) == 0, "unexpected finalization state != 0") + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) hdr = self.header(obj) hdr.tid |= GCFLAG_FINALIZATION_ORDERING + self.kept_alive_by_finalizer += raw_malloc_usage(totalsize) def _recursively_bump_finalization_state_from_2_to_3(self, obj): ll_assert(self._finalization_state(obj) == 2, diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -1099,42 +1099,6 @@ self.interpret(fn, []) - def test_bounded_memory_when_allocating_with_finalizers(self): - # Issue #2590: when allocating a lot of objects with a finalizer - # and little else, the bounds in the (inc)minimark GC are not - # set up reasonably and the total memory usage grows without - # limit. - class B(object): - pass - b = B() - b.num_deleted = 0 - class A(object): - def __init__(self): - fq.register_finalizer(self) - class FQ(rgc.FinalizerQueue): - Class = A - def finalizer_trigger(self): - while True: - a = self.next_dead() - if a is None: - break - b.num_deleted += 1 - fq = FQ() - def f(x, y): - i = 0 - alive_max = 0 - while i < x: - i += 1 - a = A() - a.x = a.y = a.z = i - #print i - b.num_deleted, b.num_deleted - alive = i - b.num_deleted - assert alive >= 0 - alive_max = max(alive_max, alive) - return alive_max - res = self.interpret(f, [1000, 0]) - assert res < 50 - from rpython.rlib.objectmodel import UnboxedValue diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -1,3 +1,4 @@ +from rpython.rlib import rgc from rpython.rlib.rarithmetic import LONG_BIT from rpython.memory.test import test_semispace_gc @@ -9,3 +10,39 @@ GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD + + def test_bounded_memory_when_allocating_with_finalizers(self): + # Issue #2590: when allocating a lot of objects with a finalizer + # and little else, the bounds in the (inc)minimark GC are not + # set up reasonably and the total memory usage grows without + # limit. + class B(object): + pass + b = B() + b.num_deleted = 0 + class A(object): + def __init__(self): + fq.register_finalizer(self) + class FQ(rgc.FinalizerQueue): + Class = A + def finalizer_trigger(self): + while True: + a = self.next_dead() + if a is None: + break + b.num_deleted += 1 + fq = FQ() + def f(x, y): + i = 0 + alive_max = 0 + while i < x: + i += 1 + a = A() + a.x = a.y = a.z = i + #print i - b.num_deleted, b.num_deleted + alive = i - b.num_deleted + assert alive >= 0 + alive_max = max(alive_max, alive) + return alive_max + res = self.interpret(f, [1000, 0]) + assert res < 100 From pypy.commits at gmail.com Mon Jul 31 05:28:58 2017 From: pypy.commits at gmail.com (arigo) Date: Mon, 31 Jul 2017 02:28:58 -0700 (PDT) Subject: [pypy-commit] cffi default: Minor fix Message-ID: <597ef85a.a4a6df0a.75422.5bb0@mx.google.com> Author: Armin Rigo Branch: Changeset: r2997:5052e1026bbd Date: 2017-07-31 11:28 +0200 http://bitbucket.org/cffi/cffi/changeset/5052e1026bbd/ Log: Minor fix diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4737,9 +4737,9 @@ /* update the total alignment requirement, but skip it if the field is an anonymous bitfield or if SF_PACKED */ falignorg = get_alignment(ftype); + if (falignorg < 0) + goto error; falign = (sflags & SF_PACKED) ? 1 : falignorg; - if (falign < 0) - goto error; do_align = 1; if (!(sflags & SF_GCC_ARM_BITFIELDS) && fbitsize >= 0) { From pypy.commits at gmail.com Mon Jul 31 05:41:08 2017 From: pypy.commits at gmail.com (arigo) Date: Mon, 31 Jul 2017 02:41:08 -0700 (PDT) Subject: [pypy-commit] pypy gc-del-limit-growth: Close branch, ready to merge Message-ID: <597efb34.10301c0a.14b97.1fae@mx.google.com> Author: Armin Rigo Branch: gc-del-limit-growth Changeset: r91994:f0335bf5364e Date: 2017-07-30 18:56 +0200 http://bitbucket.org/pypy/pypy/changeset/f0335bf5364e/ Log: Close branch, ready to merge From pypy.commits at gmail.com Mon Jul 31 05:41:11 2017 From: pypy.commits at gmail.com (arigo) Date: Mon, 31 Jul 2017 02:41:11 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix for b43a6e2c0ea1: can't very reasonably use appexec() here, Message-ID: <597efb37.90aadf0a.b97e4.864e@mx.google.com> Author: Armin Rigo Branch: Changeset: r91996:abb9f6f4b003 Date: 2017-07-31 09:40 +0200 http://bitbucket.org/pypy/pypy/changeset/abb9f6f4b003/ Log: Fix for b43a6e2c0ea1: can't very reasonably use appexec() here, because it would create a single jit loop. If the logic is called with various types in the same program, we get a longer and longer jit loop keeping previous results alive. Fixed the same way than listobject.py's _do_extend_from_iterable. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -569,14 +569,43 @@ return list(buf.as_str()) return _from_byte_sequence(space, w_source) +def _get_printable_location(w_type): + return ('bytearray_from_byte_sequence [w_type=%s]' % + w_type.getname(w_type.space)) + +_byteseq_jitdriver = jit.JitDriver( + name='bytearray_from_byte_sequence', + greens=['w_type'], + reds=['w_iter', 'data'], + get_printable_location=_get_printable_location) + def _from_byte_sequence(space, w_source): # Split off in a separate function for the JIT's benefit - w_result = space.appexec([w_source], """(seq): - result = bytearray() - for i in seq: - result.append(i) - return result""") - return w_result.getdata() + # and add a jitdriver with the type of w_iter as the green key + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + data = newlist_hint(length_hint) + # + _from_byte_sequence_loop(space, w_iter, data) + # + extended = len(data) + if extended < length_hint: + resizelist_hint(data, extended) + return data + +def _from_byte_sequence_loop(space, w_iter, data): + w_type = space.type(w_iter) + while True: + _byteseq_jitdriver.jit_merge_point(w_type=w_type, + w_iter=w_iter, + data=data) + try: + w_item = space.next(w_iter) + except OperationError as e: + if not e.match(space, space.w_StopIteration): + raise + break + data.append(space.byte_w(w_item)) def _hex_digit_to_int(d): diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -448,6 +448,13 @@ raises(TypeError, b.extend, [object()]) raises(TypeError, b.extend, u"unicode") + def test_extend_calls_len_or_lengthhint(self): + class BadLen(object): + def __iter__(self): return iter(range(10)) + def __len__(self): raise RuntimeError('hello') + b = bytearray() + raises(RuntimeError, b.extend, BadLen()) + def test_setitem_from_front(self): b = bytearray(b'abcdefghij') b[:2] = b'' From pypy.commits at gmail.com Mon Jul 31 05:41:09 2017 From: pypy.commits at gmail.com (arigo) Date: Mon, 31 Jul 2017 02:41:09 -0700 (PDT) Subject: [pypy-commit] pypy default: hg merge gc-del-limit-growth Message-ID: <597efb35.c98adf0a.b4cba.a0c5@mx.google.com> Author: Armin Rigo Branch: Changeset: r91995:44577e4653fa Date: 2017-07-30 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/44577e4653fa/ Log: hg merge gc-del-limit-growth Issue #2590: fix the bounds in the GC when allocating a lot of objects with finalizers diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2308,6 +2308,7 @@ ll_assert(not (self.probably_young_objects_with_finalizers .non_empty()), "probably_young_objects_with_finalizers should be empty") + self.kept_alive_by_finalizer = r_uint(0) if self.old_objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() elif self.old_objects_with_weakrefs.non_empty(): @@ -2380,6 +2381,9 @@ # we currently have -- but no more than 'max_delta' more than # we currently have. total_memory_used = float(self.get_total_memory_used()) + total_memory_used -= float(self.kept_alive_by_finalizer) + if total_memory_used < 0: + total_memory_used = 0 bounded = self.set_major_threshold_from( min(total_memory_used * self.major_collection_threshold, total_memory_used + self.max_delta), @@ -2418,7 +2422,7 @@ self.execute_finalizers() #END FINALIZING else: - pass #XXX which exception to raise here. Should be unreachable. + ll_assert(False, "bogus gc_state") debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") @@ -2784,8 +2788,17 @@ def _bump_finalization_state_from_0_to_1(self, obj): ll_assert(self._finalization_state(obj) == 0, "unexpected finalization state != 0") + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) hdr = self.header(obj) hdr.tid |= GCFLAG_FINALIZATION_ORDERING + # A bit hackish, but we will not count these objects as "alive" + # for the purpose of computing when the next major GC should + # occur. This is done for issue #2590: without this, if we + # allocate mostly objects with finalizers, the + # next_major_collection_threshold grows forever and actual + # memory usage is not bounded. + self.kept_alive_by_finalizer += raw_malloc_usage(totalsize) def _recursively_bump_finalization_state_from_2_to_3(self, obj): ll_assert(self._finalization_state(obj) == 2, diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1636,6 +1636,7 @@ # with a finalizer and all objects reachable from there (and also # moves some objects from 'objects_with_finalizers' to # 'run_finalizers'). + self.kept_alive_by_finalizer = r_uint(0) if self.old_objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() # @@ -1678,6 +1679,9 @@ # we currently have -- but no more than 'max_delta' more than # we currently have. total_memory_used = float(self.get_total_memory_used()) + total_memory_used -= float(self.kept_alive_by_finalizer) + if total_memory_used < 0: + total_memory_used = 0 bounded = self.set_major_threshold_from( min(total_memory_used * self.major_collection_threshold, total_memory_used + self.max_delta), @@ -1999,8 +2003,11 @@ def _bump_finalization_state_from_0_to_1(self, obj): ll_assert(self._finalization_state(obj) == 0, "unexpected finalization state != 0") + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) hdr = self.header(obj) hdr.tid |= GCFLAG_FINALIZATION_ORDERING + self.kept_alive_by_finalizer += raw_malloc_usage(totalsize) def _recursively_bump_finalization_state_from_2_to_3(self, obj): ll_assert(self._finalization_state(obj) == 2, diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -1,3 +1,4 @@ +from rpython.rlib import rgc from rpython.rlib.rarithmetic import LONG_BIT from rpython.memory.test import test_semispace_gc @@ -9,3 +10,39 @@ GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD + + def test_bounded_memory_when_allocating_with_finalizers(self): + # Issue #2590: when allocating a lot of objects with a finalizer + # and little else, the bounds in the (inc)minimark GC are not + # set up reasonably and the total memory usage grows without + # limit. + class B(object): + pass + b = B() + b.num_deleted = 0 + class A(object): + def __init__(self): + fq.register_finalizer(self) + class FQ(rgc.FinalizerQueue): + Class = A + def finalizer_trigger(self): + while True: + a = self.next_dead() + if a is None: + break + b.num_deleted += 1 + fq = FQ() + def f(x, y): + i = 0 + alive_max = 0 + while i < x: + i += 1 + a = A() + a.x = a.y = a.z = i + #print i - b.num_deleted, b.num_deleted + alive = i - b.num_deleted + assert alive >= 0 + alive_max = max(alive_max, alive) + return alive_max + res = self.interpret(f, [1000, 0]) + assert res < 100 From pypy.commits at gmail.com Mon Jul 31 07:55:19 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 31 Jul 2017 04:55:19 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Un-xfail a test; delete one that doesn't make sense on pypy3 Message-ID: <597f1aa7.0b471c0a.10538.74e5@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r91997:46c9f0976aad Date: 2017-07-31 12:54 +0100 http://bitbucket.org/pypy/pypy/changeset/46c9f0976aad/ Log: Un-xfail a test; delete one that doesn't make sense on pypy3 diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -90,7 +90,6 @@ assert not self.space.finalizer_queue.next_dead() - at pytest.mark.xfail(reason="Skipped until other tests in this file are unskipped") class AppTestApi(LeakCheckingTest): def setup_class(cls): from rpython.rlib.clibffi import get_libc_name @@ -112,12 +111,6 @@ def test_only_import(self): import cpyext - @pytest.mark.skipif(only_pypy, reason='pypy only test') - def test_load_error(self): - import cpyext - raises(ImportError, cpyext.load_module, "missing.file", "foo") - raises(ImportError, cpyext.load_module, self.libc, "invalid.function") - def test_dllhandle(self): import sys if sys.platform != "win32" or sys.version_info < (2, 6): From pypy.commits at gmail.com Mon Jul 31 08:14:12 2017 From: pypy.commits at gmail.com (arigo) Date: Mon, 31 Jul 2017 05:14:12 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <597f1f14.82451c0a.9e07a.4dbf@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r91998:2ae11d0d2965 Date: 2017-07-31 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/2ae11d0d2965/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,14 @@ .. this is a revision shortly after release-pypy2.7-v5.8.0 .. startrev: 558bd00b3dd8 +In previous versions of PyPy, ``instance.method`` would return always +the same bound method object, when gotten out of the same instance (as +far as ``is`` and ``id()`` can tell). CPython doesn't do that. Now +PyPy, like CPython, returns a different bound method object every time. +For ``type.method``, PyPy2 still returns always the same *unbound* +method object; CPython does it for built-in types but not for +user-defined types. + .. branch: cffi-complex .. branch: cffi-char16-char32 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -2,8 +2,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import ( - compute_hash, compute_unique_id, import_from_mixin, newlist_hint, - resizelist_hint) + compute_hash, compute_unique_id, import_from_mixin) from rpython.rlib.rstring import StringBuilder from pypy.interpreter.baseobjspace import W_Root @@ -751,14 +750,40 @@ return _from_byte_sequence(space, w_source) +def _get_printable_location(w_type): + return ('bytearray_from_byte_sequence [w_type=%s]' % + w_type.getname(w_type.space)) + +_byteseq_jitdriver = jit.JitDriver( + name='bytearray_from_byte_sequence', + greens=['w_type'], + reds=['w_iter', 'builder'], + get_printable_location=_get_printable_location) + def _from_byte_sequence(space, w_source): # Split off in a separate function for the JIT's benefit - w_result = space.appexec([w_source], """(seq): - result = bytearray() - for i in seq: - result.append(i) - return result""") - return ''.join(w_result.getdata()) + # and add a jitdriver with the type of w_iter as the green key + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + builder = StringBuilder(length_hint) + # + _from_byte_sequence_loop(space, w_iter, builder) + # + return builder.build() + +def _from_byte_sequence_loop(space, w_iter, builder): + w_type = space.type(w_iter) + while True: + _byteseq_jitdriver.jit_merge_point(w_type=w_type, + w_iter=w_iter, + builder=builder) + try: + w_item = space.next(w_iter) + except OperationError as e: + if not e.match(space, space.w_StopIteration): + raise + break + builder.append(space.byte_w(w_item)) W_BytesObject.typedef = TypeDef( "bytes", None, None, "read", diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -466,6 +466,13 @@ raises(TypeError, b.extend, [object()]) raises(TypeError, b.extend, "unicode") + def test_extend_calls_len_or_lengthhint(self): + class BadLen(object): + def __iter__(self): return iter(range(10)) + def __len__(self): raise RuntimeError('hello') + b = bytearray() + raises(RuntimeError, b.extend, BadLen()) + def test_setitem_from_front(self): b = bytearray(b'abcdefghij') b[:2] = b'' diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2308,6 +2308,7 @@ ll_assert(not (self.probably_young_objects_with_finalizers .non_empty()), "probably_young_objects_with_finalizers should be empty") + self.kept_alive_by_finalizer = r_uint(0) if self.old_objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() elif self.old_objects_with_weakrefs.non_empty(): @@ -2380,6 +2381,9 @@ # we currently have -- but no more than 'max_delta' more than # we currently have. total_memory_used = float(self.get_total_memory_used()) + total_memory_used -= float(self.kept_alive_by_finalizer) + if total_memory_used < 0: + total_memory_used = 0 bounded = self.set_major_threshold_from( min(total_memory_used * self.major_collection_threshold, total_memory_used + self.max_delta), @@ -2418,7 +2422,7 @@ self.execute_finalizers() #END FINALIZING else: - pass #XXX which exception to raise here. Should be unreachable. + ll_assert(False, "bogus gc_state") debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") @@ -2784,8 +2788,17 @@ def _bump_finalization_state_from_0_to_1(self, obj): ll_assert(self._finalization_state(obj) == 0, "unexpected finalization state != 0") + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) hdr = self.header(obj) hdr.tid |= GCFLAG_FINALIZATION_ORDERING + # A bit hackish, but we will not count these objects as "alive" + # for the purpose of computing when the next major GC should + # occur. This is done for issue #2590: without this, if we + # allocate mostly objects with finalizers, the + # next_major_collection_threshold grows forever and actual + # memory usage is not bounded. + self.kept_alive_by_finalizer += raw_malloc_usage(totalsize) def _recursively_bump_finalization_state_from_2_to_3(self, obj): ll_assert(self._finalization_state(obj) == 2, diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1636,6 +1636,7 @@ # with a finalizer and all objects reachable from there (and also # moves some objects from 'objects_with_finalizers' to # 'run_finalizers'). + self.kept_alive_by_finalizer = r_uint(0) if self.old_objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() # @@ -1678,6 +1679,9 @@ # we currently have -- but no more than 'max_delta' more than # we currently have. total_memory_used = float(self.get_total_memory_used()) + total_memory_used -= float(self.kept_alive_by_finalizer) + if total_memory_used < 0: + total_memory_used = 0 bounded = self.set_major_threshold_from( min(total_memory_used * self.major_collection_threshold, total_memory_used + self.max_delta), @@ -1999,8 +2003,11 @@ def _bump_finalization_state_from_0_to_1(self, obj): ll_assert(self._finalization_state(obj) == 0, "unexpected finalization state != 0") + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) hdr = self.header(obj) hdr.tid |= GCFLAG_FINALIZATION_ORDERING + self.kept_alive_by_finalizer += raw_malloc_usage(totalsize) def _recursively_bump_finalization_state_from_2_to_3(self, obj): ll_assert(self._finalization_state(obj) == 2, diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -1,3 +1,4 @@ +from rpython.rlib import rgc from rpython.rlib.rarithmetic import LONG_BIT from rpython.memory.test import test_semispace_gc @@ -9,3 +10,39 @@ GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD + + def test_bounded_memory_when_allocating_with_finalizers(self): + # Issue #2590: when allocating a lot of objects with a finalizer + # and little else, the bounds in the (inc)minimark GC are not + # set up reasonably and the total memory usage grows without + # limit. + class B(object): + pass + b = B() + b.num_deleted = 0 + class A(object): + def __init__(self): + fq.register_finalizer(self) + class FQ(rgc.FinalizerQueue): + Class = A + def finalizer_trigger(self): + while True: + a = self.next_dead() + if a is None: + break + b.num_deleted += 1 + fq = FQ() + def f(x, y): + i = 0 + alive_max = 0 + while i < x: + i += 1 + a = A() + a.x = a.y = a.z = i + #print i - b.num_deleted, b.num_deleted + alive = i - b.num_deleted + assert alive >= 0 + alive_max = max(alive_max, alive) + return alive_max + res = self.interpret(f, [1000, 0]) + assert res < 100 From pypy.commits at gmail.com Mon Jul 31 09:46:22 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 31 Jul 2017 06:46:22 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Add more information when the leakfinder finds an error Message-ID: <597f34ae.cb141c0a.8c21e.5a8f@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r91999:e4f438ccf573 Date: 2017-07-31 14:19 +0100 http://bitbucket.org/pypy/pypy/changeset/e4f438ccf573/ Log: Add more information when the leakfinder finds an error diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -7,6 +7,8 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext import api +from pypy.module.cpyext.api import cts +from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.state import State from rpython.tool import leakfinder from rpython.rlib import rawrefcount @@ -82,8 +84,6 @@ return space.is_interned_str(s) def is_allowed_to_leak(space, obj): - from pypy.module.cpyext.pyobject import from_ref - from pypy.module.cpyext.api import cts from pypy.module.cpyext.methodobject import W_PyCFunctionObject try: w_obj = from_ref(space, cts.cast('PyObject*', obj._as_ptr())) @@ -95,6 +95,21 @@ # the test, but the w_obj is referred to from elsewhere. return is_interned_string(space, w_obj) +def _get_w_obj(space, c_obj): + return from_ref(space, cts.cast('PyObject*', c_obj._as_ptr())) + +class CpyextLeak(leakfinder.MallocMismatch): + def __str__(self): + lines = [leakfinder.MallocMismatch.__str__(self), ''] + lines.append( + "These objects are attached to the following W_Root objects:") + for c_obj in self.args[0]: + try: + lines.append(" %s" % (_get_w_obj(self.args[1], c_obj),)) + except: + pass + return '\n'.join(lines) + class LeakCheckingTest(object): """Base class for all cpyext tests.""" @@ -116,7 +131,7 @@ if not is_allowed_to_leak(self.space, obj): filtered_result[obj] = value if filtered_result: - raise leakfinder.MallocMismatch(filtered_result) + raise CpyextLeak(filtered_result, self.space) assert not self.space.finalizer_queue.next_dead() From pypy.commits at gmail.com Mon Jul 31 11:54:54 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 31 Jul 2017 08:54:54 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Preload a few more builtin types Message-ID: <597f52ce.8baddf0a.c05b8.3b31@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r92000:466144c090c7 Date: 2017-07-31 15:46 +0100 http://bitbucket.org/pypy/pypy/changeset/466144c090c7/ Log: Preload a few more builtin types diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -209,7 +209,10 @@ # 'import os' to warm up reference counts w_import = space.builtin.getdictvalue(space, '__import__') space.call_function(w_import, space.wrap("os")) - for name in ['buffer', 'mmap.mmap']: + for name in [ + 'buffer', 'mmap.mmap', + 'types.FunctionType', 'types.CodeType', + 'types.TracebackType', 'types.FrameType']: preload(space, name) for expr in ['type(str.join)']: preload_expr(space, expr) From pypy.commits at gmail.com Mon Jul 31 11:54:56 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 31 Jul 2017 08:54:56 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: fix refleaks in test_tupleobject Message-ID: <597f52d0.c7ac1c0a.a4532.289d@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r92001:d4f923fa8dfa Date: 2017-07-31 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/d4f923fa8dfa/ Log: fix refleaks in test_tupleobject diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -24,6 +24,7 @@ def test_tuple_realize_refuses_nulls(self, space, api): py_tuple = api.PyTuple_New(1) py.test.raises(FatalError, from_ref, space, py_tuple) + api.Py_DecRef(py_tuple) def test_tuple_resize(self, space, api): w_42 = space.wrap(42) @@ -70,6 +71,7 @@ w_tuple = from_ref(space, py_tuple) assert space.eq_w(w_tuple, space.newtuple([space.wrap(42), space.wrap(43)])) + api.Py_DecRef(py_tuple) def test_getslice(self, space, api): w_tuple = space.newtuple([space.wrap(i) for i in range(10)]) @@ -174,6 +176,7 @@ res = PyTuple_SetItem(tuple, 0, one); if (res != 0) { + Py_DECREF(one); Py_DECREF(tuple); return NULL; } @@ -187,14 +190,13 @@ /* Do something that uses the tuple, but does not incref */ t2 = PyTuple_GetSlice(tuple, 0, 1); Py_DECREF(t2); - Py_INCREF(one); res = PyTuple_SetItem(tuple, 0, one); - Py_DECREF(tuple); if (res != 0) { - Py_DECREF(one); + Py_DECREF(tuple); return NULL; } + Py_DECREF(tuple); Py_INCREF(Py_None); return Py_None; """), @@ -205,4 +207,3 @@ raises(SystemError, module.set_after_use, s) else: module.set_after_use(s) - diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -143,6 +143,7 @@ old_ref = tupleobj.c_ob_item[index] if pyobj_has_w_obj(ref): # similar but not quite equal to ref.c_ob_refcnt != 1 on CPython + decref(space, py_obj) raise oefmt(space.w_SystemError, "PyTuple_SetItem called on tuple after" " use of tuple") tupleobj.c_ob_item[index] = py_obj # consumes a reference From pypy.commits at gmail.com Mon Jul 31 15:00:44 2017 From: pypy.commits at gmail.com (exarkun) Date: Mon, 31 Jul 2017 12:00:44 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: encode the type name to bytes for interpolation Message-ID: <597f7e5c.6ea0df0a.110b4.b6bf@mx.google.com> Author: Jean-Paul Calderone Branch: py3.5 Changeset: r92002:22c27ed7a494 Date: 2017-07-31 14:59 -0400 http://bitbucket.org/pypy/pypy/changeset/22c27ed7a494/ Log: encode the type name to bytes for interpolation fixes annotator error: string formatting mixing strings and unicode not supported diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -752,7 +752,7 @@ def _get_printable_location(w_type): return ('bytearray_from_byte_sequence [w_type=%s]' % - w_type.getname(w_type.space)) + w_type.getname(w_type.space).encode('utf-8')) _byteseq_jitdriver = jit.JitDriver( name='bytearray_from_byte_sequence', From pypy.commits at gmail.com Mon Jul 31 15:59:07 2017 From: pypy.commits at gmail.com (mattip) Date: Mon, 31 Jul 2017 12:59:07 -0700 (PDT) Subject: [pypy-commit] pypy default: document merged branches Message-ID: <597f8c0b.c896df0a.c3871.260b@mx.google.com> Author: Matti Picus Branch: Changeset: r92003:97134fb162c0 Date: 2017-07-31 22:57 +0300 http://bitbucket.org/pypy/pypy/changeset/97134fb162c0/ Log: document merged branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -38,3 +38,18 @@ Renaming of ``cppyy`` to ``_cppyy``. The former is now an external package installable with ``pip install cppyy``. + +.. branch: Enable_PGO_for_clang + +.. branch: nopax + +Deleted ``--nopax`` option + +.. branch: pypy_bytearray + +Improve ``bytearray`` performance (backported from py3.5) + +.. branch: gc-del-limit-growth + +Fix the bounds in the GC when allocating a lot of objects with finalizers, +fixes issue #2590 From pypy.commits at gmail.com Mon Jul 31 16:58:28 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 31 Jul 2017 13:58:28 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: Create builtin pyobjs early for interp-level tests as well Message-ID: <597f99f4.51711c0a.62c96.fc18@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r92004:ee6ee454da80 Date: 2017-07-31 21:50 +0100 http://bitbucket.org/pypy/pypy/changeset/ee6ee454da80/ Log: Create builtin pyobjs early for interp-level tests as well diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -6,7 +6,8 @@ from pypy.module.cpyext.api import ( slot_function, cpython_api, copy_header_files, INTERPLEVEL_API, Py_ssize_t, Py_ssize_tP, PyObject, cts) -from pypy.module.cpyext.test.test_cpyext import freeze_refcnts, LeakCheckingTest +from pypy.module.cpyext.test.test_cpyext import ( + freeze_refcnts, LeakCheckingTest) from pypy.interpreter.error import OperationError from rpython.rlib import rawrefcount import os @@ -32,6 +33,7 @@ space.call_function(space.getattr(space.sys.get("stdout"), space.wrap("write")), space.wrap("")) + cls.preload_builtins(space) class CAPI: def __getattr__(self, name): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -76,6 +76,23 @@ def freeze_refcnts(self): rawrefcount._dont_free_any_more() +def preload(space, name): + from pypy.module.cpyext.pyobject import make_ref + if '.' not in name: + w_obj = space.builtin.getdictvalue(space, name) + else: + module, localname = name.rsplit('.', 1) + code = "(): import {module}; return {module}.{localname}" + code = code.format(**locals()) + w_obj = space.appexec([], code) + make_ref(space, w_obj) + +def preload_expr(space, expr): + from pypy.module.cpyext.pyobject import make_ref + code = "(): return {}".format(expr) + w_obj = space.appexec([], code) + make_ref(space, w_obj) + def is_interned_string(space, w_obj): try: s = space.str_w(w_obj) @@ -118,6 +135,24 @@ 'micronumpy', 'mmap' ]) + @classmethod + def preload_builtins(cls, space): + """ + Eagerly create pyobjs for various builtins so they don't look like + leaks. + """ + space.getbuiltinmodule("cpyext") + # 'import os' to warm up reference counts + w_import = space.builtin.getdictvalue(space, '__import__') + space.call_function(w_import, space.wrap("os")) + for name in [ + 'buffer', 'mmap.mmap', + 'types.FunctionType', 'types.CodeType', + 'types.TracebackType', 'types.FrameType']: + preload(space, name) + for expr in ['type(str.join)']: + preload_expr(space, expr) + def cleanup(self): self.space.getexecutioncontext().cleanup_cpyext_state() rawrefcount._collect() @@ -178,23 +213,6 @@ def debug_collect(space): rawrefcount._collect() -def preload(space, name): - from pypy.module.cpyext.pyobject import make_ref - if '.' not in name: - w_obj = space.builtin.getdictvalue(space, name) - else: - module, localname = name.rsplit('.', 1) - code = "(): import {module}; return {module}.{localname}" - code = code.format(**locals()) - w_obj = space.appexec([], code) - make_ref(space, w_obj) - -def preload_expr(space, expr): - from pypy.module.cpyext.pyobject import make_ref - code = "(): return {}".format(expr) - w_obj = space.appexec([], code) - make_ref(space, w_obj) - class AppTestCpythonExtensionBase(LeakCheckingTest): @@ -205,20 +223,8 @@ cls.w_runappdirect = space.wrap(cls.runappdirect) if not cls.runappdirect: cls.sys_info = get_cpyext_info(space) - space.getbuiltinmodule("cpyext") - # 'import os' to warm up reference counts - w_import = space.builtin.getdictvalue(space, '__import__') - space.call_function(w_import, space.wrap("os")) - for name in [ - 'buffer', 'mmap.mmap', - 'types.FunctionType', 'types.CodeType', - 'types.TracebackType', 'types.FrameType']: - preload(space, name) - for expr in ['type(str.join)']: - preload_expr(space, expr) - #state = cls.space.fromcache(RefcountState) ZZZ - #state.non_heaptypes_w[:] = [] cls.w_debug_collect = space.wrap(interp2app(debug_collect)) + cls.preload_builtins(space) else: def w_import_module(self, name, init=None, body='', filename=None, include_dirs=None, PY_SSIZE_T_CLEAN=False): From pypy.commits at gmail.com Mon Jul 31 16:58:30 2017 From: pypy.commits at gmail.com (rlamy) Date: Mon, 31 Jul 2017 13:58:30 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-leakchecking: fix pseudo-leak in test_traceback.py Message-ID: <597f99f6.82251c0a.8b11f.b231@mx.google.com> Author: Ronan Lamy Branch: cpyext-leakchecking Changeset: r92005:5e01a8ace2de Date: 2017-07-31 21:56 +0100 http://bitbucket.org/pypy/pypy/changeset/5e01a8ace2de/ Log: fix pseudo-leak in test_traceback.py diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py --- a/pypy/module/cpyext/test/test_traceback.py +++ b/pypy/module/cpyext/test/test_traceback.py @@ -3,17 +3,19 @@ from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.pytraceback import PyTracebackObject from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.baseobjspace import AppExecCache class TestPyTracebackObject(BaseApiTest): def test_traceback(self, space, api): - w_traceback = space.appexec([], """(): + src = """(): import sys try: 1/0 except: return sys.exc_info()[2] - """) + """ + w_traceback = space.appexec([], src) + py_obj = make_ref(space, w_traceback) py_traceback = rffi.cast(PyTracebackObject, py_obj) assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is @@ -38,3 +40,5 @@ assert lltype.normalizeptr(py_traceback) is None api.Py_DecRef(py_obj) + # hack to allow the code object to be freed + del space.fromcache(AppExecCache).content[src] From pypy.commits at gmail.com Mon Jul 31 19:06:16 2017 From: pypy.commits at gmail.com (wlav) Date: Mon, 31 Jul 2017 16:06:16 -0700 (PDT) Subject: [pypy-commit] pypy default: remove leftover test_cint.py (and higher directories) Message-ID: <597fb7e8.091f1c0a.7becb.a45c@mx.google.com> Author: Wim Lavrijsen Branch: Changeset: r92006:edf12627beaf Date: 2017-07-31 15:52 -0700 http://bitbucket.org/pypy/pypy/changeset/edf12627beaf/ Log: remove leftover test_cint.py (and higher directories) diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py deleted file mode 100644 --- a/pypy/module/cppyy/test/test_cint.py +++ /dev/null @@ -1,710 +0,0 @@ -import py, os, sys - -# These tests are for the CINT backend only (they exercise ROOT features -# and classes that are not loaded/available with the Reflex backend). At -# some point, these tests are likely covered by the CLang/LLVM backend. -from pypy.module.cppyy import capi -if capi.identify() != 'CINT': - py.test.skip("backend-specific: CINT-only tests") - -# load _cffi_backend early, or its global vars are counted as leaks in the -# test (note that the module is not otherwise used in the test itself) -from pypy.module._cffi_backend import newtype - -currpath = py.path.local(__file__).dirpath() -iotypes_dct = str(currpath.join("iotypesDict.so")) - -def setup_module(mod): - if sys.platform == 'win32': - py.test.skip("win32 not supported so far") - err = os.system("cd '%s' && make CINT=t iotypesDict.so" % currpath) - if err: - raise OSError("'make' failed (see stderr)") - -class AppTestCINT: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) - - def test01_globals(self): - """Test the availability of ROOT globals""" - - import cppyy - - assert cppyy.gbl.gROOT - assert cppyy.gbl.gApplication - assert cppyy.gbl.gSystem - assert cppyy.gbl.TInterpreter.Instance() # compiled - assert cppyy.gbl.TInterpreter # interpreted - assert cppyy.gbl.TDirectory.CurrentDirectory() # compiled - assert cppyy.gbl.TDirectory # interpreted - - def test02_write_access_to_globals(self): - """Test overwritability of ROOT globals""" - - import cppyy - - oldval = cppyy.gbl.gDebug - assert oldval != 3 - - proxy = cppyy.gbl.__class__.__dict__['gDebug'] - cppyy.gbl.gDebug = 3 - assert proxy.__get__(proxy, None) == 3 - - # this is where this test differs from test03_write_access_to_globals - # in test_pythonify.py - cppyy.gbl.gROOT.ProcessLine('int gDebugCopy = gDebug;') - assert cppyy.gbl.gDebugCopy == 3 - - cppyy.gbl.gDebug = oldval - - def test03_create_access_to_globals(self): - """Test creation and access of new ROOT globals""" - - import cppyy - - cppyy.gbl.gROOT.ProcessLine('double gMyOwnGlobal = 3.1415') - assert cppyy.gbl.gMyOwnGlobal == 3.1415 - - proxy = cppyy.gbl.__class__.__dict__['gMyOwnGlobal'] - assert proxy.__get__(proxy, None) == 3.1415 - - def test04_auto_loading(self): - """Test auto-loading by retrieving a non-preloaded class""" - - import cppyy - - l = cppyy.gbl.TLorentzVector() - assert isinstance(l, cppyy.gbl.TLorentzVector) - - def test05_macro_loading(self): - """Test accessibility to macro classes""" - - import cppyy - - loadres = cppyy.gbl.gROOT.LoadMacro('simple_class.C') - assert loadres == 0 - - base = cppyy.gbl.MySimpleBase - simple = cppyy.gbl.MySimpleDerived - simple_t = cppyy.gbl.MySimpleDerived_t - - assert issubclass(simple, base) - assert simple is simple_t - - c = simple() - assert isinstance(c, simple) - assert c.m_data == c.get_data() - - c.set_data(13) - assert c.m_data == 13 - assert c.get_data() == 13 - - -class AppTestCINTPYTHONIZATIONS: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) - - def test01_strings(self): - """Test TString/TObjString compatibility""" - - import cppyy - - pyteststr = "aap noot mies" - def test_string(s1, s2): - assert len(s1) == len(s2) - assert s1 == s1 - assert s1 == s2 - assert s1 == str(s1) - assert s1 == pyteststr - assert s1 != "aap" - assert s1 != "" - assert s1 < "noot" - assert repr(s1) == repr(s2) - - s1 = cppyy.gbl.TString(pyteststr) - test_string(s1, pyteststr) - - s3 = cppyy.gbl.TObjString(pyteststr) - test_string(s3, pyteststr) - - def test03_TVector(self): - """Test TVector2/3/T behavior""" - - import cppyy, math - - N = 51 - - # TVectorF is a typedef of floats - v = cppyy.gbl.TVectorF(N) - for i in range(N): - v[i] = i*i - - assert len(v) == N - for j in v: - assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. - - def test04_TStringTObjString(self): - """Test string/TString interchangebility""" - - import cppyy - - test = "aap noot mies" - - s1 = cppyy.gbl.TString(test ) - s2 = str(s1) - - assert s1 == test - assert test == s2 - assert s1 == s2 - - s3 = cppyy.gbl.TObjString(s2) - assert s3 == test - assert s2 == s3 - - # force use of: TNamed(const TString &name, const TString &title) - n = cppyy.gbl.TNamed(test, cppyy.gbl.TString("title")) - assert n.GetTitle() == "title" - assert n.GetName() == test - - -class AppTestCINTTTREE: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) - - def setup_class(cls): - cls.w_N = cls.space.newint(5) - cls.w_M = cls.space.newint(10) - cls.w_fname = cls.space.newtext("test.root") - cls.w_tname = cls.space.newtext("test") - cls.w_title = cls.space.newtext("test tree") - cls.w_iotypes = cls.space.appexec([], """(): - import cppyy - return cppyy.load_reflection_info(%r)""" % (iotypes_dct,)) - - def test01_write_stdvector(self): - """Test writing of a single branched TTree with an std::vector""" - - from cppyy import gbl # bootstraps, only needed for tests - from cppyy.gbl import TFile, TTree - from cppyy.gbl.std import vector - - f = TFile(self.fname, "RECREATE") - mytree = TTree(self.tname, self.title) - mytree._python_owns = False - - v = vector("double")() - raises(TypeError, TTree.Branch, None, "mydata", v.__class__.__name__, v) - raises(TypeError, TTree.Branch, v, "mydata", v.__class__.__name__, v) - - mytree.Branch("mydata", v.__class__.__name__, v) - - for i in range(self.N): - for j in range(self.M): - v.push_back(i*self.M+j) - mytree.Fill() - v.clear() - f.Write() - f.Close() - - def test02_file_open(self): - - from cppyy import gbl - - f = gbl.TFile.Open(self.fname) - s = str(f) # should not raise - r = repr(f) - - f.Close() - - def test03_read_stdvector(self): - """Test reading of a single branched TTree with an std::vector""" - - from cppyy import gbl - from cppyy.gbl import TFile - - f = TFile(self.fname) - mytree = f.Get(self.tname) - - i = 0 - for event in mytree: - assert len(event.mydata) == self.M - for entry in event.mydata: - assert i == int(entry) - i += 1 - assert i == self.N * self.M - - f.Close() - - def test04_write_some_data_object(self): - """Test writing of a complex data object""" - - from cppyy import gbl - from cppyy.gbl import TFile, TTree, IO - from cppyy.gbl.IO import SomeDataObject - - f = TFile(self.fname, "RECREATE") - mytree = TTree(self.tname, self.title) - - d = SomeDataObject() - b = mytree.Branch("data", d) - mytree._python_owns = False - assert b - - for i in range(self.N): - for j in range(self.M): - d.add_float(i*self.M+j) - d.add_tuple(d.get_floats()) - - mytree.Fill() - - f.Write() - f.Close() - - def test05_read_some_data_object(self): - """Test reading of a complex data object""" - - from cppyy import gbl - from cppyy.gbl import TFile - - f = TFile(self.fname) - mytree = f.Get(self.tname) - - j = 1 - for event in mytree: - i = 0 - assert len(event.data.get_floats()) == j*self.M - for entry in event.data.get_floats(): - assert i == int(entry) - i += 1 - - k = 1 - assert len(event.data.get_tuples()) == j - for mytuple in event.data.get_tuples(): - i = 0 - assert len(mytuple) == k*self.M - for entry in mytuple: - assert i == int(entry) - i += 1 - k += 1 - j += 1 - assert j-1 == self.N - # - f.Close() - - def test06_branch_activation(self): - """Test of automatic branch activation""" - - from cppyy import gbl - from cppyy.gbl import TFile, TTree - from cppyy.gbl.std import vector - - L = 5 - - # writing - f = TFile(self.fname, "RECREATE") - mytree = TTree(self.tname, self.title) - mytree._python_owns = False - - for i in range(L): - v = vector("double")() - mytree.Branch("mydata_%d"%i, v.__class__.__name__, v) - mytree.__dict__["v_%d"%i] = v - - for i in range(self.N): - for k in range(L): - v = mytree.__dict__["v_%d"%k] - for j in range(self.M): - mytree.__dict__["v_%d"%k].push_back(i*self.M+j*L+k) - mytree.Fill() - for k in range(L): - v = mytree.__dict__["v_%d"%k] - v.clear() - f.Write() - f.Close() - - del mytree, f - import gc - gc.collect() - - # reading - f = TFile(self.fname) - mytree = f.Get(self.tname) - - # force (initial) disabling of all branches - mytree.SetBranchStatus("*",0); - - i = 0 - for event in mytree: - for k in range(L): - j = 0 - data = getattr(mytree, "mydata_%d"%k) - assert len(data) == self.M - for entry in data: - assert entry == i*self.M+j*L+k - j += 1 - assert j == self.M - i += 1 - assert i == self.N - - f.Close() - - def test07_write_builtin(self): - """Test writing of builtins""" - - from cppyy import gbl # bootstraps, only needed for tests - from cppyy.gbl import TFile, TTree - from cppyy.gbl.std import vector - - f = TFile(self.fname, "RECREATE") - mytree = TTree(self.tname, self.title) - mytree._python_owns = False - - import array - mytree.ba = array.array('c', [chr(0)]) - mytree.ia = array.array('i', [0]) - mytree.da = array.array('d', [0.]) - - mytree.Branch("my_bool", mytree.ba, "my_bool/O") - mytree.Branch("my_int", mytree.ia, "my_int/I") - mytree.Branch("my_int2", mytree.ia, "my_int2/I") - mytree.Branch("my_double", mytree.da, "my_double/D") - - for i in range(self.N): - # make sure value is different from default (0) - mytree.ba[0] = i%2 and chr(0) or chr(1) - mytree.ia[0] = i+1 - mytree.da[0] = (i+1)/2. - mytree.Fill() - f.Write() - f.Close() - - def test08_read_builtin(self): - """Test reading of builtins""" - - from cppyy import gbl - from cppyy.gbl import TFile - - f = TFile(self.fname) - mytree = f.Get(self.tname) - - raises(AttributeError, getattr, mytree, "does_not_exist") - - i = 1 - for event in mytree: - assert event.my_bool == (i-1)%2 and 0 or 1 - assert event.my_int == i - assert event.my_double == i/2. - i += 1 - assert (i-1) == self.N - - f.Close() - - def test09_user_read_builtin(self): - """Test user-directed reading of builtins""" - - from cppyy import gbl - from cppyy.gbl import TFile - - f = TFile(self.fname) - mytree = f.Get(self.tname) - - # note, this is an old, annoted tree from test08 - for i in range(3, mytree.GetEntriesFast()): - mytree.GetEntry(i) - assert mytree.my_int == i+1 - assert mytree.my_int2 == i+1 - - f.Close() - -class AppTestCINTREGRESSION: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) - - # these are tests that at some point in the past resulted in failures on - # PyROOT; kept here to confirm no regression from PyROOT - - def test01_regression(self): - """TPaveText::AddText() used to result in KeyError""" - - # This is where the original problem was discovered, and the test is - # left in. However, the detailed underlying problem, as well as the - # solution to it, is tested in test_fragile.py - - from cppyy import gbl - from cppyy.gbl import TPaveText - - hello = TPaveText( .1, .8, .9, .97 ) - hello.AddText( 'Hello, World!' ) - - -class AppTestCINTFUNCTION: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) - _pypytest_leaks = None # TODO: figure out the false positives - - # test the function callbacks; this does not work with Reflex, as it can - # not generate functions on the fly (it might with cffi?) - - @py.test.mark.dont_track_allocations("TODO: understand; initialization left-over?") - def test01_global_function_callback(self): - """Test callback of a python global function""" - - import cppyy, gc - TF1 = cppyy.gbl.TF1 - - def identity(x): - return x[0] - - f = TF1("pyf1", identity, -1., 1., 0) - - assert f.Eval(0.5) == 0.5 - assert f.Eval(-10.) == -10. - assert f.Eval(1.0) == 1.0 - - # check proper propagation of default value - f = TF1("pyf1d", identity, -1., 1.) - - assert f.Eval(0.5) == 0.5 - - del f # force here, to prevent leak-check complaints - gc.collect() - - def test02_callable_object_callback(self): - """Test callback of a python callable object""" - - import cppyy, gc - TF1 = cppyy.gbl.TF1 - - class Linear: - def __call__(self, x, par): - return par[0] + x[0]*par[1] - - f = TF1("pyf2", Linear(), -1., 1., 2) - f.SetParameters(5., 2.) - - assert f.Eval(-0.1) == 4.8 - assert f.Eval(1.3) == 7.6 - - del f # force here, to prevent leak-check complaints - gc.collect() - - def test03_fit_with_python_gaussian(self): - """Test fitting with a python global function""" - - # note: this function is dread-fully slow when running testing un-translated - - import cppyy, gc, math - TF1, TH1F = cppyy.gbl.TF1, cppyy.gbl.TH1F - - def pygaus(x, par): - arg1 = 0 - scale1 = 0 - ddx = 0.01 - - if (par[2] != 0.0): - arg1 = (x[0]-par[1])/par[2] - scale1 = (ddx*0.39894228)/par[2] - h1 = par[0]/(1+par[3]) - - gauss = h1*scale1*math.exp(-0.5*arg1*arg1) - else: - gauss = 0. - return gauss - - f = TF1("pygaus", pygaus, -4, 4, 4) - f.SetParameters(600, 0.43, 0.35, 600) - - h = TH1F("h", "test", 100, -4, 4) - h.FillRandom("gaus", 200000) - h.Fit(f, "0Q") - - assert f.GetNDF() == 96 - result = f.GetParameters() - assert round(result[1] - 0., 1) == 0 # mean - assert round(result[2] - 1., 1) == 0 # s.d. - - del f # force here, to prevent leak-check complaints - gc.collect() - - -class AppTestSURPLUS: - spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) - - # these are tests that were historically exercised on ROOT classes and - # have twins on custom classes; kept here just in case differences crop - # up between the ROOT classes and the custom ones - - def test01_class_enum(self): - """Test class enum access and values""" - - import cppyy - TObject = cppyy.gbl.TObject - gROOT = cppyy.gbl.gROOT - - assert TObject.kBitMask == gROOT.ProcessLine("return TObject::kBitMask;") - assert TObject.kIsOnHeap == gROOT.ProcessLine("return TObject::kIsOnHeap;") - assert TObject.kNotDeleted == gROOT.ProcessLine("return TObject::kNotDeleted;") - assert TObject.kZombie == gROOT.ProcessLine("return TObject::kZombie;") - - t = TObject() - - assert TObject.kBitMask == t.kBitMask - assert TObject.kIsOnHeap == t.kIsOnHeap - assert TObject.kNotDeleted == t.kNotDeleted - assert TObject.kZombie == t.kZombie - - def test02_global_enum(self): - """Test global enums access and values""" - - import cppyy - from cppyy import gbl - - assert gbl.kRed == gbl.gROOT.ProcessLine("return kRed;") - assert gbl.kGreen == gbl.gROOT.ProcessLine("return kGreen;") - assert gbl.kBlue == gbl.gROOT.ProcessLine("return kBlue;") - - def test03_copy_contructor(self): - """Test copy constructor""" - - import cppyy - TLorentzVector = cppyy.gbl.TLorentzVector - - t1 = TLorentzVector(1., 2., 3., -4.) - t2 = TLorentzVector(0., 0., 0., 0.) - t3 = TLorentzVector(t1) - - assert t1 == t3 - assert t1 != t2 - - for i in range(4): - assert t1[i] == t3[i] - - def test04_object_validity(self): - """Test object validity checking""" - - import cppyy - - t1 = cppyy.gbl.TObject() - - assert t1 - assert not not t1 - - t2 = cppyy.gbl.gROOT.FindObject("Nah, I don't exist") - - assert not t2 - - def test05_element_access(self): - """Test access to elements in matrix and array objects.""" - - from cppyy import gbl - - N = 3 - v = gbl.TVectorF(N) - m = gbl.TMatrixD(N, N) - - for i in range(N): - assert v[i] == 0.0 - - for j in range(N): - assert m[i][j] == 0.0 - - def test06_static_function_call( self ): - """Test call to static function.""" - - import cppyy - TROOT, gROOT = cppyy.gbl.TROOT, cppyy.gbl.gROOT - - c1 = TROOT.Class() - assert not not c1 - - c2 = gROOT.Class() - - assert c1 == c2 - - old = gROOT.GetDirLevel() - TROOT.SetDirLevel(2) - assert 2 == gROOT.GetDirLevel() - gROOT.SetDirLevel(old) - - old = TROOT.GetDirLevel() - gROOT.SetDirLevel(3) - assert 3 == TROOT.GetDirLevel() - TROOT.SetDirLevel(old) - - def test07_macro(self): - """Test access to cpp macro's""" - - from cppyy import gbl - - assert gbl.NULL == 0 - - gbl.gROOT.ProcessLine('#define aap "aap"') - gbl.gROOT.ProcessLine('#define noot 1') - gbl.gROOT.ProcessLine('#define mies 2.0') - - # TODO: macro's assumed to always be of long type ... - #assert gbl.aap == "aap" - assert gbl.noot == 1 - #assert gbl.mies == 2.0 - - def test08_opaque_pointer_passing(self): - """Test passing around of opaque pointers""" - - import cppyy - - # TODO: figure out CObject (see also test_advanced.py) - - s = cppyy.gbl.TString("Hello World!") - #cobj = cppyy.as_cobject(s) - addr = cppyy.addressof(s) - - #assert s == cppyy.bind_object(cobj, s.__class__) - #assert s == cppyy.bind_object(cobj, "TString") - assert s == cppyy.bind_object(addr, s.__class__) - assert s == cppyy.bind_object(addr, "TString") - - def test09_object_and_pointer_comparisons(self): - """Verify object and pointer comparisons""" - - import cppyy - gbl = cppyy.gbl - - c1 = cppyy.bind_object(0, gbl.TCanvas) - assert c1 == None - assert None == c1 - - c2 = cppyy.bind_object(0, gbl.TCanvas) - assert c1 == c2 - assert c2 == c1 - - # TLorentzVector overrides operator== - l1 = cppyy.bind_object(0, gbl.TLorentzVector) - assert l1 == None - assert None == l1 - - assert c1 != l1 - assert l1 != c1 - - l2 = cppyy.bind_object(0, gbl.TLorentzVector) - assert l1 == l2 - assert l2 == l1 - - l3 = gbl.TLorentzVector(1, 2, 3, 4) - l4 = gbl.TLorentzVector(1, 2, 3, 4) - l5 = gbl.TLorentzVector(4, 3, 2, 1) - assert l3 == l4 - assert l4 == l3 - - assert l3 != None # like this to ensure __ne__ is called - assert None != l3 # id. - assert l3 != l5 - assert l5 != l3 - - def test10_recursive_remove(self): - """Verify that objects are recursively removed when destroyed""" - - import cppyy - - c = cppyy.gbl.TClass.GetClass("TObject") - - o = cppyy.gbl.TObject() - assert o - - o.SetBit(cppyy.gbl.TObject.kMustCleanup) - c.Destructor(o) - assert not o From pypy.commits at gmail.com Mon Jul 31 19:22:56 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:22:56 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Set exponential base to two, i.e., double trx length on commit Message-ID: <597fbbd0.07a5df0a.2f446.5682@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2130:dba308a7d960 Date: 2017-07-17 16:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/dba308a7d960/ Log: Set exponential base to two, i.e., double trx length on commit diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -18,23 +18,24 @@ #define DEFAULT_FILL_MARK_NURSERY_BYTES (NURSERY_SIZE / 4) // #define LARGE_FILL_MARK_NURSERY_BYTES DEFAULT_FILL_MARK_NURSERY_BYTES -#define LARGE_FILL_MARK_NURSERY_BYTES 0x10000000L +#define LARGE_FILL_MARK_NURSERY_BYTES 0x100000000L // #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L -// corresponds to ~270 bytes nursery fill -#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) -#define BACKOFF_MULTIPLIER (20 / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) +// corresponds to ~430 bytes nursery fill +#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.0000001) +#define BACKOFF_COUNT (20) +#define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { // the shorter the trx, the more backoff: 100 at min trx length, proportional decrease to 5 at max trx length (think a/x + b = backoff) tl->transaction_length_backoff = (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + 5); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); - tl->linear_transaction_length_increment = rel_trx_len; + tl->linear_transaction_length_increment = rel_trx_len / BACKOFF_COUNT; } static inline double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { - const int multiplier = 100; + const int multiplier = 2; double previous = tl->relative_transaction_length; double new = previous; if (aborts) { From pypy.commits at gmail.com Mon Jul 31 19:22:58 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:22:58 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Increase min trx length and fix capping trx length Message-ID: <597fbbd2.4f821c0a.f850e.cd15@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2131:d86628e2626c Date: 2017-07-20 20:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/d86628e2626c/ Log: Increase min trx length and fix capping trx length diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -21,9 +21,9 @@ #define LARGE_FILL_MARK_NURSERY_BYTES 0x100000000L // #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L -// corresponds to ~430 bytes nursery fill -#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.0000001) -#define BACKOFF_COUNT (20) +// corresponds to ~4 KB nursery fill +#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) +#define BACKOFF_COUNT (10) #define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { @@ -37,18 +37,18 @@ static inline double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { const int multiplier = 2; double previous = tl->relative_transaction_length; - double new = previous; + double new; if (aborts) { - if (previous > STM_MIN_RELATIVE_TRANSACTION_LENGTH) { - new = previous / multiplier; - } else { + new = previous / multiplier; + if (new < STM_MIN_RELATIVE_TRANSACTION_LENGTH) { new = STM_MIN_RELATIVE_TRANSACTION_LENGTH; } set_backoff(tl, new); } else if (tl->transaction_length_backoff == 0) { // backoff counter is zero, exponential increase up to 1 - if (previous < 1) { - new = previous * multiplier; + new = previous * multiplier; + if (new > 1) { + new = 1; } if (tl->linear_transaction_length_increment != 0) { // thread had to abort before: slow start @@ -56,8 +56,9 @@ } } else { // not abort and backoff != 0 // in backoff, linear increase up to 1 - if (previous < 1) { - new = previous + tl->linear_transaction_length_increment; + new = previous + tl->linear_transaction_length_increment; + if (new > 1) { + new = 1; } tl->transaction_length_backoff -= 1; } From pypy.commits at gmail.com Mon Jul 31 19:23:01 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:01 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Backed out changeset c6265dd2c77c Message-ID: <597fbbd5.cd87df0a.d4474.9cfd@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2133:08a8d7fd1866 Date: 2017-07-21 14:19 +0200 http://bitbucket.org/pypy/stmgc/changeset/08a8d7fd1866/ Log: Backed out changeset c6265dd2c77c diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -22,18 +22,12 @@ // #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L // corresponds to ~4 KB nursery fill -#define STM_DEFAULT_REL_TRANSACTION_LENGTH (0.000001) -// commit after ~4 B or likely after every instruction -#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000000001) - +#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) #define BACKOFF_COUNT (10) -#define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_DEFAULT_REL_TRANSACTION_LENGTH)) +#define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { - /* the shorter the trx, the more backoff: - think a*x + b = backoff, x := -log(rel-trx-len), - backoff is + b at default trx length, - linear decrease to b at max trx length */ + // the shorter the trx, the more backoff: 100 at min trx length, proportional decrease to 5 at max trx length (think a/x + b = backoff) tl->transaction_length_backoff = (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + 5); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -247,7 +247,7 @@ tl->thread_local_counter = ++thread_local_counters; /* init adaptive transaction length mode */ - tl->relative_transaction_length = STM_DEFAULT_REL_TRANSACTION_LENGTH; + tl->relative_transaction_length = STM_MIN_RELATIVE_TRANSACTION_LENGTH; tl->transaction_length_backoff = 0; tl->linear_transaction_length_increment = 0; From pypy.commits at gmail.com Mon Jul 31 19:23:03 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:03 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Decrease trx len just ten percent at a time below min Message-ID: <597fbbd7.b288df0a.254f8.b396@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2134:70e630a22710 Date: 2017-07-21 14:42 +0200 http://bitbucket.org/pypy/stmgc/changeset/70e630a22710/ Log: Decrease trx len just ten percent at a time below min diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -23,15 +23,19 @@ // corresponds to ~4 KB nursery fill #define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) -#define BACKOFF_COUNT (10) +#define BACKOFF_COUNT (5) #define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { - // the shorter the trx, the more backoff: 100 at min trx length, proportional decrease to 5 at max trx length (think a/x + b = backoff) + /* the shorter the trx, the more backoff: + think a*x + b = backoff, x := -log(rel-trx-len), + backoff is + b at default trx length, + linear decrease to b at max trx length */ + const int b = 5; tl->transaction_length_backoff = - (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + 5); + (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + b); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); - tl->linear_transaction_length_increment = rel_trx_len / BACKOFF_COUNT; + tl->linear_transaction_length_increment = rel_trx_len / (BACKOFF_COUNT + b); } static inline double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { @@ -41,7 +45,8 @@ if (aborts) { new = previous / multiplier; if (new < STM_MIN_RELATIVE_TRANSACTION_LENGTH) { - new = STM_MIN_RELATIVE_TRANSACTION_LENGTH; + // reached min trx length, only decrease slowly + new = 0.9 * previous; } set_backoff(tl, new); } else if (tl->transaction_length_backoff == 0) { From pypy.commits at gmail.com Mon Jul 31 19:23:06 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:06 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Backed out changeset dbea548c4c6e Message-ID: <597fbbda.4f821c0a.f850e.cd2c@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2136:5563999ed658 Date: 2017-07-23 13:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/5563999ed658/ Log: Backed out changeset dbea548c4c6e diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -347,9 +347,6 @@ } if (thread_local_for_logging != NULL) { - if (needs_abort) { - stm_transaction_length_handle_validation(thread_local_for_logging, true); - } stop_timer_and_publish_for_thread( thread_local_for_logging, STM_DURATION_VALIDATION); } @@ -1585,6 +1582,8 @@ tl->self_or_0_if_atomic = (intptr_t)tl; /* clear the 'atomic' flag */ STM_PSEGMENT->atomic_nesting_levels = 0; + stm_transaction_length_handle_validation(tl, true); + if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); if (tl->mem_reset_on_abort) { From pypy.commits at gmail.com Mon Jul 31 19:23:08 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:08 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Grow backoff cumulatively on repeated aborts Message-ID: <597fbbdc.11371c0a.f5aa2.cb82@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2137:feed32340eb2 Date: 2017-07-23 17:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/feed32340eb2/ Log: Grow backoff cumulatively on repeated aborts diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -32,7 +32,7 @@ backoff is + b at default trx length, linear decrease to b at max trx length */ const int b = 5; - tl->transaction_length_backoff = + tl->transaction_length_backoff += (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + b); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); tl->linear_transaction_length_increment = rel_trx_len / (BACKOFF_COUNT + b); From pypy.commits at gmail.com Mon Jul 31 19:23:10 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:10 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Backed out changeset feed32340eb2 Message-ID: <597fbbde.4a90df0a.e6128.e08f@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2138:48e819b53680 Date: 2017-07-24 10:42 +0200 http://bitbucket.org/pypy/stmgc/changeset/48e819b53680/ Log: Backed out changeset feed32340eb2 diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -32,7 +32,7 @@ backoff is + b at default trx length, linear decrease to b at max trx length */ const int b = 5; - tl->transaction_length_backoff += + tl->transaction_length_backoff = (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + b); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); tl->linear_transaction_length_increment = rel_trx_len / (BACKOFF_COUNT + b); From pypy.commits at gmail.com Mon Jul 31 19:23:12 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:12 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Set backoff to best working value so far and re-enable slow start Message-ID: <597fbbe0.308fdf0a.454f4.1146@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2139:6a3c6e601284 Date: 2017-07-24 11:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/6a3c6e601284/ Log: Set backoff to best working value so far and re-enable slow start diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -23,7 +23,7 @@ // corresponds to ~4 KB nursery fill #define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) -#define BACKOFF_COUNT (5) +#define BACKOFF_COUNT (20) #define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { @@ -32,10 +32,10 @@ backoff is + b at default trx length, linear decrease to b at max trx length */ const int b = 5; - tl->transaction_length_backoff = - (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + b); + int new_backoff = (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + b); + tl->transaction_length_backoff = new_backoff; // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); - tl->linear_transaction_length_increment = rel_trx_len / (BACKOFF_COUNT + b); + tl->linear_transaction_length_increment = rel_trx_len / new_backoff; } static inline double get_new_transaction_length(stm_thread_local_t *tl, bool aborts) { @@ -55,10 +55,10 @@ if (new > 1) { new = 1; } - // if (tl->linear_transaction_length_increment != 0) { - // // thread had to abort before: slow start - // set_backoff(tl, new); - // } + if (tl->linear_transaction_length_increment != 0) { + // thread had to abort before: slow start + set_backoff(tl, new); + } } else { // not abort and backoff != 0 // in backoff, linear increase up to 1 new = previous + tl->linear_transaction_length_increment; From pypy.commits at gmail.com Mon Jul 31 19:23:14 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:14 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Initialize trx len roughly to old default of 1MB and hard cap on lower limit of 4KB Message-ID: <597fbbe2.1babdf0a.4684e.79b5@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2140:3bc9bfa7d481 Date: 2017-07-24 16:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/3bc9bfa7d481/ Log: Initialize trx len roughly to old default of 1MB and hard cap on lower limit of 4KB diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -17,12 +17,14 @@ #define DEFAULT_FILL_MARK_NURSERY_BYTES (NURSERY_SIZE / 4) -// #define LARGE_FILL_MARK_NURSERY_BYTES DEFAULT_FILL_MARK_NURSERY_BYTES +// corresponds to ~4 GB #define LARGE_FILL_MARK_NURSERY_BYTES 0x100000000L -// #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L +// corresponds to ~4 MB nursery fill +#define STM_DEFAULT_RELATIVE_TRANSACTION_LENGTH (0.001) // corresponds to ~4 KB nursery fill #define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) + #define BACKOFF_COUNT (20) #define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) @@ -45,8 +47,7 @@ if (aborts) { new = previous / multiplier; if (new < STM_MIN_RELATIVE_TRANSACTION_LENGTH) { - // reached min trx length, only decrease slowly - new = 0.9 * previous; + new = STM_MIN_RELATIVE_TRANSACTION_LENGTH; } set_backoff(tl, new); } else if (tl->transaction_length_backoff == 0) { diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -247,7 +247,7 @@ tl->thread_local_counter = ++thread_local_counters; /* init adaptive transaction length mode */ - tl->relative_transaction_length = STM_MIN_RELATIVE_TRANSACTION_LENGTH; + tl->relative_transaction_length = STM_DEFAULT_RELATIVE_TRANSACTION_LENGTH; tl->transaction_length_backoff = 0; tl->linear_transaction_length_increment = 0; From pypy.commits at gmail.com Mon Jul 31 19:23:16 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:16 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Merge warm up complete event Message-ID: <597fbbe4.1babdf0a.4684e.79ba@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2141:2828bbba12a4 Date: 2017-07-29 11:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/2828bbba12a4/ Log: Merge warm up complete event diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -583,6 +583,8 @@ STM_GC_MAJOR_DONE, /* execution duration profiling events */ + STM_WARMUP_COMPLETE, + STM_DURATION_START_TRX, STM_DURATION_WRITE_GC_ONLY, STM_DURATION_WRITE_SLOWPATH, @@ -613,6 +615,7 @@ "gc major start", \ "gc major done", \ /* names of duration events */ \ + "marks completion of benchmark warm up phase" \ "duration of transaction start", \ "duration of gc due to write", \ "duration of write slowpath", \ From pypy.commits at gmail.com Mon Jul 31 19:23:00 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:00 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Distinguish min and default trx length to allow shrinking to single instruction level Message-ID: <597fbbd4.c2b81c0a.7fa21.591a@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2132:c6265dd2c77c Date: 2017-07-21 11:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/c6265dd2c77c/ Log: Distinguish min and default trx length to allow shrinking to single instruction level diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -22,12 +22,18 @@ // #define LARGE_FILL_MARK_NURSERY_BYTES 0x1000000000000000L // corresponds to ~4 KB nursery fill -#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000001) +#define STM_DEFAULT_REL_TRANSACTION_LENGTH (0.000001) +// commit after ~4 B or likely after every instruction +#define STM_MIN_RELATIVE_TRANSACTION_LENGTH (0.000000001) + #define BACKOFF_COUNT (10) -#define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_MIN_RELATIVE_TRANSACTION_LENGTH)) +#define BACKOFF_MULTIPLIER (BACKOFF_COUNT / -log10(STM_DEFAULT_REL_TRANSACTION_LENGTH)) static inline void set_backoff(stm_thread_local_t *tl, double rel_trx_len) { - // the shorter the trx, the more backoff: 100 at min trx length, proportional decrease to 5 at max trx length (think a/x + b = backoff) + /* the shorter the trx, the more backoff: + think a*x + b = backoff, x := -log(rel-trx-len), + backoff is + b at default trx length, + linear decrease to b at max trx length */ tl->transaction_length_backoff = (int)((BACKOFF_MULTIPLIER * -log10(rel_trx_len)) + 5); // printf("thread %d, backoff %d\n", tl->thread_local_counter, tl->transaction_length_backoff); diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -247,7 +247,7 @@ tl->thread_local_counter = ++thread_local_counters; /* init adaptive transaction length mode */ - tl->relative_transaction_length = STM_MIN_RELATIVE_TRANSACTION_LENGTH; + tl->relative_transaction_length = STM_DEFAULT_REL_TRANSACTION_LENGTH; tl->transaction_length_backoff = 0; tl->linear_transaction_length_increment = 0; From pypy.commits at gmail.com Mon Jul 31 19:23:05 2017 From: pypy.commits at gmail.com (tobweber) Date: Mon, 31 Jul 2017 16:23:05 -0700 (PDT) Subject: [pypy-commit] stmgc c8-tcp-style-trx-length: Disable slow start Message-ID: <597fbbd9.97a9df0a.40b5c.7d18@mx.google.com> Author: Tobias Weber Branch: c8-tcp-style-trx-length Changeset: r2135:d9897d451fff Date: 2017-07-23 13:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/d9897d451fff/ Log: Disable slow start diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -55,10 +55,10 @@ if (new > 1) { new = 1; } - if (tl->linear_transaction_length_increment != 0) { - // thread had to abort before: slow start - set_backoff(tl, new); - } + // if (tl->linear_transaction_length_increment != 0) { + // // thread had to abort before: slow start + // set_backoff(tl, new); + // } } else { // not abort and backoff != 0 // in backoff, linear increase up to 1 new = previous + tl->linear_transaction_length_increment; From pypy.commits at gmail.com Mon Jul 31 20:07:47 2017 From: pypy.commits at gmail.com (wlav) Date: Mon, 31 Jul 2017 17:07:47 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: rename destruct -> __destruct__ to not interfere with user defined methods Message-ID: <597fc653.d47d1c0a.29b66.02d9@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r92007:a0f6d0e8e563 Date: 2017-07-31 16:43 -0700 http://bitbucket.org/pypy/pypy/changeset/a0f6d0e8e563/ Log: rename destruct -> __destruct__ to not interfere with user defined methods diff --git a/pypy/module/_cppyy/interp_cppyy.py b/pypy/module/_cppyy/interp_cppyy.py --- a/pypy/module/_cppyy/interp_cppyy.py +++ b/pypy/module/_cppyy/interp_cppyy.py @@ -1195,7 +1195,7 @@ __len__ = interp2app(W_CPPInstance.instance__len__), __cmp__ = interp2app(W_CPPInstance.instance__cmp__), __repr__ = interp2app(W_CPPInstance.instance__repr__), - destruct = interp2app(W_CPPInstance.destruct), + __destruct__ = interp2app(W_CPPInstance.destruct), ) W_CPPInstance.typedef.acceptable_as_base_class = True diff --git a/pypy/module/_cppyy/test/test_advancedcpp.py b/pypy/module/_cppyy/test/test_advancedcpp.py --- a/pypy/module/_cppyy/test/test_advancedcpp.py +++ b/pypy/module/_cppyy/test/test_advancedcpp.py @@ -32,25 +32,25 @@ assert d.m_a == t(11) assert d.m_b == t(22) assert d.m_c == t(33) - d.destruct() + d.__destruct__() d = defaulter(0) assert d.m_a == t(0) assert d.m_b == t(22) assert d.m_c == t(33) - d.destruct() + d.__destruct__() d = defaulter(1, 2) assert d.m_a == t(1) assert d.m_b == t(2) assert d.m_c == t(33) - d.destruct() + d.__destruct__() d = defaulter(3, 4, 5) assert d.m_a == t(3) assert d.m_b == t(4) assert d.m_c == t(5) - d.destruct() + d.__destruct__() test_defaulter('short', int) test_defaulter('ushort', int) test_defaulter('int', int) @@ -87,7 +87,7 @@ assert b.m_db == 11.11 assert b.get_base_value() == 11.11 - b.destruct() + b.__destruct__() d = derived_class() assert isinstance(d, derived_class) @@ -114,7 +114,7 @@ assert d.m_db == 11.11 assert d.get_base_value() == 11.11 - d.destruct() + d.__destruct__() def test03_namespaces(self): """Test access to namespaces and inner classes""" @@ -219,7 +219,7 @@ t1 = gbl.T1(int)() assert t1.m_t1 == 1 assert t1.get_value() == 1 - t1.destruct() + t1.__destruct__() #----- t1 = gbl.T1(int)(11) @@ -228,14 +228,14 @@ t1.m_t1 = 111 assert t1.get_value() == 111 assert t1.m_t1 == 111 - t1.destruct() + t1.__destruct__() #----- t2 = gbl.T2(gbl.T1(int))(gbl.T1(int)(32)) t2.m_t2.m_t1 = 32 assert t2.m_t2.get_value() == 32 assert t2.m_t2.m_t1 == 32 - t2.destruct() + t2.__destruct__() def test05_abstract_classes(self): @@ -296,7 +296,7 @@ b.m_db = 22.22 assert b.m_db == 22.22 - b.destruct() + b.__destruct__() #----- c1 = c_class_1() @@ -317,7 +317,7 @@ assert c1.m_c == 33 assert c1.get_value() == 33 - c1.destruct() + c1.__destruct__() #----- d = d_class() @@ -345,7 +345,7 @@ assert d.m_d == 44 assert d.get_value() == 44 - d.destruct() + d.__destruct__() def test07_pass_by_reference(self): """Test reference passing when using virtual inheritance""" @@ -361,7 +361,7 @@ b.m_a, b.m_b = 11, 22 assert gbl.get_a(b) == 11 assert gbl.get_b(b) == 22 - b.destruct() + b.__destruct__() #----- c = c_class() @@ -369,7 +369,7 @@ assert gbl.get_a(c) == 11 assert gbl.get_b(c) == 22 assert gbl.get_c(c) == 33 - c.destruct() + c.__destruct__() #----- d = d_class() @@ -378,7 +378,7 @@ assert gbl.get_b(d) == 22 assert gbl.get_c(d) == 33 assert gbl.get_d(d) == 44 - d.destruct() + d.__destruct__() def test08_void_pointer_passing(self): """Test passing of variants of void pointer arguments""" @@ -462,8 +462,8 @@ assert not dd1a is dd2 assert not dd1b is dd2 - d2.destruct() - d1.destruct() + d2.__destruct__() + d1.__destruct__() def test11_multi_methods(self): """Test calling of methods from multiple inheritance""" @@ -533,12 +533,12 @@ c1 = _cppyy.gbl.create_c1() assert type(c1) == _cppyy.gbl.c_class_1 assert c1.m_c == 3 - c1.destruct() + c1.__destruct__() c2 = _cppyy.gbl.create_c2() assert type(c2) == _cppyy.gbl.c_class_2 assert c2.m_c == 3 - c2.destruct() + c2.__destruct__() def test14_new_overloader(self): """Verify that class-level overloaded new/delete are called""" @@ -548,7 +548,7 @@ assert _cppyy.gbl.new_overloader.s_instances == 0 nl = _cppyy.gbl.new_overloader() assert _cppyy.gbl.new_overloader.s_instances == 1 - nl.destruct() + nl.__destruct__() import gc gc.collect() diff --git a/pypy/module/_cppyy/test/test_cppyy.py b/pypy/module/_cppyy/test/test_cppyy.py --- a/pypy/module/_cppyy/test/test_cppyy.py +++ b/pypy/module/_cppyy/test/test_cppyy.py @@ -98,19 +98,19 @@ assert res == 11 res = t.get_overload("addDataToInt").call(e1, -4) assert res == 3 - e1.destruct() + e1.__destruct__() assert t.get_overload("getCount").call(None) == 0 raises(ReferenceError, 't.get_overload("addDataToInt").call(e1, 4)') e1 = t.get_overload(t.type_name).call(None, 7) e2 = t.get_overload(t.type_name).call(None, 8) assert t.get_overload("getCount").call(None) == 2 - e1.destruct() + e1.__destruct__() assert t.get_overload("getCount").call(None) == 1 - e2.destruct() + e2.__destruct__() assert t.get_overload("getCount").call(None) == 0 - e2.destruct() + e2.__destruct__() assert t.get_overload("getCount").call(None) == 0 raises(TypeError, t.get_overload("addDataToInt").call, 41, 4) @@ -141,7 +141,7 @@ e1 = None gc.collect() assert t.get_overload("getCount").call(None) == 1 - e2.destruct() + e2.__destruct__() assert t.get_overload("getCount").call(None) == 0 e2 = None gc.collect() @@ -178,12 +178,12 @@ e = t.get_overload(t.type_name).call(None, 13) res = t.get_overload("addDataToDouble").call(e, 16) assert round(res-29, 8) == 0. - e.destruct() + e.__destruct__() e = t.get_overload(t.type_name).call(None, -13) res = t.get_overload("addDataToDouble").call(e, 16) assert round(res-3, 8) == 0. - e.destruct() + e.__destruct__() assert t.get_overload("getCount").call(None) == 0 def test07_method_constcharp(self): @@ -200,7 +200,7 @@ assert res == "54" res = t.get_overload("addToStringValue").call(e, "-12") # TODO: this leaks assert res == "30" - e.destruct() + e.__destruct__() assert t.get_overload("getCount").call(None) == 0 def test08_pass_object_by_pointer(self): @@ -219,8 +219,8 @@ t1.get_overload("setPayload").call(e, pl); assert round(t2.get_overload("getData").call(pl)-50., 8) == 0 - e.destruct() - pl.destruct() + e.__destruct__() + pl.__destruct__() assert t1.get_overload("getCount").call(None) == 0 def test09_return_object_by_pointer(self): @@ -239,6 +239,6 @@ pl2 = t1.get_overload("cyclePayload").call(e, pl1); assert round(t2.get_overload("getData").call(pl2)-50., 8) == 0 - e.destruct() - pl1.destruct() + e.__destruct__() + pl1.__destruct__() assert t1.get_overload("getCount").call(None) == 0 diff --git a/pypy/module/_cppyy/test/test_datatypes.py b/pypy/module/_cppyy/test/test_datatypes.py --- a/pypy/module/_cppyy/test/test_datatypes.py +++ b/pypy/module/_cppyy/test/test_datatypes.py @@ -110,7 +110,7 @@ assert not hasattr(CppyyTestData, 'm_bool') assert not hasattr(CppyyTestData, 'm_int') - c.destruct() + c.__destruct__() def test03_instance_data_write_access(self): """Test write access to instance public data and verify values""" @@ -197,7 +197,7 @@ for i in range(self.N): assert eval('c.m_%s_array2[i]' % names[j]) == b[i] - c.destruct() + c.__destruct__() def test04_array_passing(self): """Test passing of array arguments""" @@ -235,7 +235,7 @@ assert not c.pass_array(_cppyy.gbl.nullptr) raises(Exception, c.pass_array(_cppyy.gbl.nullptr).__getitem__, 0) # id. id. - c.destruct() + c.__destruct__() def test05_class_read_access(self): """Test read access to class public data and verify values""" @@ -276,7 +276,7 @@ assert round(CppyyTestData.s_double + 707., 8) == 0 assert round(c.s_double + 707., 8) == 0 - c.destruct() + c.__destruct__() def test06_class_data_write_access(self): """Test write access to class public data and verify values""" @@ -340,7 +340,7 @@ CppyyTestData.s_double = math.pi assert c.s_double == math.pi - c.destruct() + c.__destruct__() def test07_range_access(self): """Test the ranges of integer types""" @@ -356,7 +356,7 @@ raises(ValueError, setattr, c, 'm_uint', -1) raises(ValueError, setattr, c, 'm_ulong', -1) - c.destruct() + c.__destruct__() def test08_type_conversions(self): """Test conversions between builtin types""" @@ -374,7 +374,7 @@ raises(TypeError, c.m_int, -1.) raises(TypeError, c.m_int, 1.) - c.destruct() + c.__destruct__() def test09_global_builtin_type(self): """Test access to a global builtin type""" @@ -618,7 +618,7 @@ raises(AttributeError, getattr, c, 'm_owns_arrays') - c.destruct() + c.__destruct__() def test18_object_and_pointer_comparisons(self): """Verify object and pointer comparisons""" diff --git a/pypy/module/_cppyy/test/test_pythonify.py b/pypy/module/_cppyy/test/test_pythonify.py --- a/pypy/module/_cppyy/test/test_pythonify.py +++ b/pypy/module/_cppyy/test/test_pythonify.py @@ -82,7 +82,7 @@ assert res == 11 res = instance.addDataToInt(-4) assert res == 3 - instance.destruct() + instance.__destruct__() assert example01_class.getCount() == 0 raises(ReferenceError, 'instance.addDataToInt(4)') return @@ -90,16 +90,16 @@ instance = example01_class(7) instance2 = example01_class(8) assert example01_class.getCount() == 2 - instance.destruct() + instance.__destruct__() assert example01_class.getCount() == 1 - instance2.destruct() + instance2.__destruct__() assert example01_class.getCount() == 0 t = self.example01 instance = example01_class(13) res = instance.addDataToDouble(16) assert round(res-29, 8) == 0. - instance.destruct() + instance.__destruct__() instance = example01_class(-13) res = instance.addDataToDouble(16) assert round(res-3, 8) == 0. @@ -119,7 +119,7 @@ res = instance.staticAddOneToInt(1L) assert res == 2 - instance.destruct() + instance.__destruct__() assert example01_class.getCount() == 0 def test05_passing_object_by_pointer(self): @@ -141,8 +141,8 @@ e.setPayload(pl) assert round(pl.getData()-14., 8) == 0 - pl.destruct() - e.destruct() + pl.__destruct__() + e.__destruct__() assert example01_class.getCount() == 0 def test06_returning_object_by_pointer(self): @@ -161,8 +161,8 @@ pl2 = e.cyclePayload(pl) assert round(pl2.getData()-14., 8) == 0 - pl.destruct() - e.destruct() + pl.__destruct__() + e.__destruct__() assert example01_class.getCount() == 0 def test07_returning_object_by_value(self): @@ -175,16 +175,16 @@ pl2 = example01_class.staticCopyCyclePayload(pl, 38.) assert pl2.getData() == 38. - pl2.destruct() + pl2.__destruct__() e = example01_class(14) pl2 = e.copyCyclePayload(pl) assert round(pl2.getData()-14., 8) == 0 - pl2.destruct() + pl2.__destruct__() - pl.destruct() - e.destruct() + pl.__destruct__() + e.__destruct__() assert example01_class.getCount() == 0 def test08_global_functions(self): @@ -341,7 +341,7 @@ o = example01() assert type(o) == example01 assert example01.getCount() == 1 - o.destruct() + o.__destruct__() assert example01.getCount() == 0 class MyClass1(example01): @@ -353,7 +353,7 @@ assert isinstance(o, example01) assert example01.getCount() == 1 assert o.myfunc() == 1 - o.destruct() + o.__destruct__() assert example01.getCount() == 0 class MyClass2(example01): @@ -365,7 +365,7 @@ assert type(o) == MyClass2 assert example01.getCount() == 1 assert o.what == 'hi' - o.destruct() + o.__destruct__() assert example01.getCount() == 0 diff --git a/pypy/module/_cppyy/test/test_stltypes.py b/pypy/module/_cppyy/test/test_stltypes.py --- a/pypy/module/_cppyy/test/test_stltypes.py +++ b/pypy/module/_cppyy/test/test_stltypes.py @@ -102,7 +102,7 @@ assert v[i].m_i == i assert len(v) == self.N - v.destruct() + v.__destruct__() def test03_empty_vector_type(self): """Test behavior of empty std::vector""" @@ -112,7 +112,7 @@ v = _cppyy.gbl.std.vector(int)() for arg in v: pass - v.destruct() + v.__destruct__() def test04_vector_iteration(self): """Test iteration over an std::vector""" @@ -137,7 +137,7 @@ assert list(v) == [i for i in range(self.N)] - v.destruct() + v.__destruct__() def test05_push_back_iterables_with_iadd(self): """Test usage of += of iterable on push_back-able container""" From pypy.commits at gmail.com Mon Jul 31 20:07:49 2017 From: pypy.commits at gmail.com (wlav) Date: Mon, 31 Jul 2017 17:07:49 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: remove backend code (lives in http://bitbucket/wlav/cppyy-backend and is shared with CPython) and the builtin capi option Message-ID: <597fc655.0591df0a.7b67e.2baf@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r92008:80bd00a75270 Date: 2017-07-31 16:48 -0700 http://bitbucket.org/pypy/pypy/changeset/80bd00a75270/ Log: remove backend code (lives in http://bitbucket/wlav/cppyy-backend and is shared with CPython) and the builtin capi option (never used in production) diff too long, truncating to 2000 out of 3411 lines diff --git a/pypy/module/_cppyy/backend/create_cppyy_package.py b/pypy/module/_cppyy/backend/create_cppyy_package.py deleted file mode 100755 --- a/pypy/module/_cppyy/backend/create_cppyy_package.py +++ /dev/null @@ -1,649 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function - -import os, sys -import argparse, re, shutil, tarfile, urllib2 - - -DEBUG_TESTBUILD = False - -TARBALL_CACHE_DIR = 'releases' - -ROOT_KEEP = ['build', 'cmake', 'config', 'core', 'etc', 'interpreter', - 'io', 'LICENSE', 'net', 'Makefile', 'CMakeLists.txt', 'math', - 'main'] # main only needed in more recent root b/c of rootcling -ROOT_CORE_KEEP = ['CMakeLists.txt', 'base', 'clib', 'clingutils', 'cont', - 'dictgen', 'foundation', 'lzma', 'macosx', 'meta', - 'metacling', 'metautils', 'rootcling_stage1', 'textinput', - 'thread', 'unix', 'utils', 'winnt', 'zip'] -ROOT_IO_KEEP = ['CMakeLists.txt', 'io', 'rootpcm'] -ROOT_NET_KEEP = ['CMakeLists.txt', 'net'] -ROOT_MATH_KEEP = ['CMakeLists.txt', 'mathcore'] -ROOT_ETC_KEEP = ['Makefile.arch', 'class.rules', 'cmake', 'dictpch', - 'gdb-backtrace.sh', 'gitinfo.txt', 'helgrind-root.supp', - 'hostcert.conf', 'system.plugins-ios', - 'valgrind-root-python.supp', 'valgrind-root.supp', 'vmc'] - -ROOT_EXPLICIT_REMOVE = ['core/base/v7', 'math/mathcore/v7', 'io/io/v7'] - - -ERR_RELEASE_NOT_FOUND = 2 - - -# -## CLI arguments -# -class ReleaseValidation(argparse.Action): - def __call__(self, parser, namespace, value, option_string=None): - if not re.match(r'6\.\d\d\.\d\d', value): - raise argparse.ArgumentTypeError( - "release number should of the form '6.dd.dd'") - setattr(namespace, self.dest, value) - return value - -parser = argparse.ArgumentParser( - description='Build PyPi package for cppyy containing the minimum of ROOT') -parser.add_argument('-r', '--release', type=str, nargs='?', - action=ReleaseValidation, help='ROOT release to use') - -args = parser.parse_args() - - -# -## ROOT source pull and cleansing -# -def clean_directory(directory, keeplist, trim_cmake=True): - removed_entries = [] - for entry in os.listdir(directory): - if entry[0] == '.' or entry in keeplist: - continue - removed_entries.append(entry) - entry = os.path.join(directory, entry) - print('now removing', entry) - if os.path.isdir(entry): - shutil.rmtree(entry) - else: - os.remove(entry) - - if not trim_cmake: - return - - # now take the removed entries out of the CMakeLists.txt - if removed_entries: - inp = os.path.join(directory, 'CMakeLists.txt') - print('trimming', inp) - outp = inp+'.new' - new_cml = open(outp, 'w') - for line in open(inp).readlines(): - if ('add_subdirectory' in line) or\ - ('COMMAND' in line and 'copy' in line) or\ - ('ROOT_ADD_TEST_SUBDIRECTORY' in line) or\ - ('install(DIRECTORY' in line): - for sub in removed_entries: - if sub in line: - line = '#'+line - break - new_cml.write(line) - new_cml.close() - os.rename(outp, inp) - else: - print('reusing existing %s/CMakeLists.txt' % (directory,)) - - -class ReleaseValidation(argparse.Action): - def __call__(self, parser, namespace, value, option_string=None): - if not re.match(r'6\.\d\d\.\d\d', value): - raise argparse.ArgumentTypeError( - "release number should of the form '6.dd.dd'") - setattr(namespace, self.dest, value) - return value - -parser = argparse.ArgumentParser( - description='Build PyPi package for cppyy containing the minimum of ROOT') -parser.add_argument('-r', '--release', type=str, nargs='?', - action=ReleaseValidation, help='ROOT release to use') - -args = parser.parse_args() - -if not os.path.exists(TARBALL_CACHE_DIR): - os.mkdir(TARBALL_CACHE_DIR) - -if args.release: - # use provided release - fn = 'root_v%s.source.tar.gz' % args.release - addr = 'https://root.cern.ch/download/'+fn - if not os.path.exists(os.path.join(TARBALL_CACHE_DIR, fn)): - try: - print('retrieving', fn) - resp = urllib2.urlopen(addr, fn) - out = open(os.path.join(TARBALL_CACHE_DIR, fn), 'wb') - out.write(resp.read()) - out.close() - except urllib2.HTTPError: - print('release %s not found' % args.release) - sys.exit(ERR_RELEASE_NOT_FOUND) - else: - print('reusing', fn, 'from local directory') -else: - print('provide release ... getting latest release is not yet implemented ...') - sys.exit(1) - # get latest and set fn, args.release, etc. - -# construct version for package -args.version = '' -testnext = False -for c in args.release: - if testnext: - testnext = False - if c == '0': - continue - if c == '.': - testnext = True - args.version += c -args.version += '.0' - -fn = os.path.join(TARBALL_CACHE_DIR, fn) -pkgdir = os.path.join('root-'+args.release) -if not os.path.exists(pkgdir): - print('now extracting', args.release) - tf = tarfile.TarFile.gzopen(fn) - tf.extractall() - tf.close() -else: - print('reusing existing directory', pkgdir) - -# remove everything except for the listed set of libraries -os.chdir(pkgdir) - -clean_directory(os.path.curdir, ROOT_KEEP) -clean_directory('core', ROOT_CORE_KEEP) -clean_directory('etc', ROOT_ETC_KEEP, trim_cmake=False) -clean_directory('io', ROOT_IO_KEEP) -clean_directory('math', ROOT_MATH_KEEP) -clean_directory('net', ROOT_NET_KEEP) - - -# trim main (only need rootcling) -print('trimming main') -for entry in os.listdir('main/src'): - if entry != 'rootcling.cxx': - os.remove('main/src/'+entry) -inp = 'main/CMakeLists.txt' -outp = inp+'.new' -new_cml = open(outp, 'w') -for line in open(inp).readlines(): - if ('ROOT_EXECUTABLE' in line or\ - 'SET_TARGET_PROPERTIES' in line) and\ - not 'rootcling' in line: - line = '#'+line - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - - -# remove afterimage and ftgl explicitly -print('trimming externals') -for cmf in ['AfterImage', 'FTGL']: - os.remove('cmake/modules/Find%s.cmake' % (cmf,)) -inp = 'cmake/modules/SearchInstalledSoftware.cmake' -outp = inp+'.new' -now_stripping = False -new_cml = open(outp, 'w') -for line in open(inp).readlines(): - if '#---Check for ftgl if needed' == line[0:28] or\ - '#---Check for AfterImage' == line[0:24]: - now_stripping = True - elif '#---Check' == line[0:9]: - now_stripping = False - if now_stripping: - line = '#'+line - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - -inp = 'cmake/modules/RootBuildOptions.cmake' -outp = inp+'.new' -new_cml = open(outp, 'w') -for line in open(inp).readlines(): - if 'ROOT_BUILD_OPTION(builtin_ftgl' in line or\ - 'ROOT_BUILD_OPTION(builtin_afterimage' in line: - line = '#'+line - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - - -# remove testing and examples -print('trimming testing') -inp = 'CMakeLists.txt' -outp = inp+'.new' -now_stripping = False -new_cml = open(outp, 'w') -for line in open(inp).readlines(): - if '#---Configure Testing using CTest' == line[0:33] or\ - '#---hsimple.root' == line[0:16]: - now_stripping = True - elif '#---Packaging' == line[0:13] or\ - '#---version' == line[0:11]: - now_stripping = False - if now_stripping: - line = '#'+line - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - -print('trimming RootCPack') -inp = 'cmake/modules/RootCPack.cmake' -outp = inp+'.new' -new_cml = open(outp, 'w') -for line in open(inp): - if 'README.txt' in line: - line = '#'+line - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - -# some more explicit removes: -for dir_to_remove in ROOT_EXPLICIT_REMOVE: - try: - shutil.rmtree(dir_to_remove) - except OSError: - pass - -# special fixes -inp = 'core/base/src/TVirtualPad.cxx' -outp = inp+'.new' -new_cml = open(outp, 'w') -for line in open(inp): - if '#include "X3DBuffer.h"' == line[0:22]: - line = """//#include "X3DBuffer.h" -typedef struct _x3d_sizeof_ { - int numPoints; - int numSegs; - int numPolys; -} Size3D; -""" - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - -inp = 'math/mathcore/src/Fitter.cxx' -if os.path.exists(inp): - outp = inp+'.new' - new_cml = open(outp, 'w') - for line in open(inp): - if '#include "TF1.h"' in line: - continue - new_cml.write(line) - new_cml.close() - os.rename(outp, inp) - -# done -os.chdir(os.path.pardir) - -# debugging: run a test build -if DEBUG_TESTBUILD: - print('running a debug test build') - tb = "test_builddir" - if os.path.exists(tb): - shutil.rmtree(tb) - os.mkdir(tb) - os.chdir(tb) - os.system('cmake ../%s -DCMAKE_INSTALL_PREFIX=../install -Dminimal=ON -Dasimage=OFF' % pkgdir) - os.system('make -j 32') - - -# -## package creation -# -countdown = 0 -pidir = 'Package-'+args.release -print('creating package', pidir) -if not os.path.exists(pidir): - os.mkdir(pidir) -os.chdir(pidir); countdown += 1 - -print('creating LICENSE.txt') -with open('LICENSE.txt', 'w') as outp: - outp.write("""There are three main parts: - - LLVM: distributed under University of Illinois/NCSA Open Source License - https://opensource.org/licenses/UoI-NCSA.php - ROOT: distributed under LGPL 2.1 - https://root.cern.ch/license - Cppyy: distributed under LBNL BSD - https://fedoraproject.org/wiki/Licensing/LBNLBSD -""") - -print('creating MANIFEST.in') -with open('MANIFEST.in', 'w') as outp: - outp.write("""# Include the license file -include LICENSE.txt - -# Include the data files -recursive-include src * -""") - -print('creating README.rst') -with open('README.rst', 'w') as outp: - outp.write("""PyPy cling-support -================== - ----- - -Find the documentation here: - http://doc.pypy.org/en/latest/cppyy.html -""") - -print('creating setup.cfg') -with open('setup.cfg', 'w') as outp: - outp.write("""[bdist_wheel] -universal=0 -""") - -print('creating setup.py') -with open('setup.py', 'w') as outp: - outp.write("""import os, sys, subprocess -from setuptools import setup, find_packages -from distutils import log -from distutils.command.build import build as _build -from setuptools.command.install import install as _install -from distutils.sysconfig import get_python_lib -from distutils.errors import DistutilsSetupError -from codecs import open - -here = os.path.abspath(os.path.dirname(__file__)) -with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: - long_description = f.read() - -builddir = None -def get_builddir(): - global builddir - if builddir is None: - topdir = os.getcwd() - builddir = os.path.join(topdir, 'builddir') - return builddir - -srcdir = None -def get_srcdir(): - global srcdir - if srcdir is None: - topdir = os.getcwd() - srcdir = os.path.join(topdir, 'src', 'backend') - return srcdir - -class my_cmake_build(_build): - def __init__(self, dist, *args, **kwargs): - _build.__init__(self, dist, *args, **kwargs) - # TODO: can't seem to find a better way of getting hold of - # the install_lib parameter during the build phase ... - prefix = '' - try: - prefix = dist.get_command_obj('install').install_lib - except AttributeError: - pass - if not prefix: - prefix = get_python_lib(1, 0) - self.prefix = os.path.join(prefix, 'cppyy_backend') - - def run(self): - # base run - _build.run(self) - - # custom run - log.info('Now building libcppyy_backend.so and dependencies') - builddir = get_builddir() - srcdir = get_srcdir() - if not os.path.exists(builddir): - log.info('Creating build directory %s ...' % builddir) - os.makedirs(builddir) - - os.chdir(builddir) - log.info('Running cmake for cppyy_backend') - if subprocess.call([ - 'cmake', srcdir, '-Dminimal=ON -Dasimage=OFF', - '-DCMAKE_INSTALL_PREFIX='+self.prefix]) != 0: - raise DistutilsSetupError('Failed to configure cppyy_backend') - - nprocs = os.getenv("MAKE_NPROCS") - if nprocs: - try: - ival = int(nprocs) - nprocs = '-j'+nprocs - except ValueError: - log.warn("Integer expected for MAKE_NPROCS, but got %s (ignored)", nprocs) - nprocs = '-j1' - else: - nprocs = '-j1' - log.info('Now building cppyy_backend and dependencies ...') - if subprocess.call(['make', nprocs]) != 0: - raise DistutilsSetupError('Failed to build cppyy_backend') - - log.info('build finished') - -class my_libs_install(_install): - def run(self): - # base install - _install.run(self) - - # custom install - log.info('Now installing libcppyy_backend.so and dependencies') - builddir = get_builddir() - if not os.path.exists(builddir): - raise DistutilsSetupError('Failed to find build dir!') - os.chdir(builddir) - - prefix = self.install_lib - log.info('Now installing in %s ...', prefix) - if subprocess.call(['make', 'install']) != 0: - raise DistutilsSetupError('Failed to install cppyy_backend') - - log.info('install finished') - - def get_outputs(self): - outputs = _install.get_outputs(self) - outputs.append(os.path.join(self.install_lib, 'cppyy_backend')) - return outputs - -setup( - name='PyPy-cppyy-backend', -""") - outp.write(" version='%s', # corresponds to ROOT %s, extra number is for packager\n"\ - % (args.version, args.release)) - outp.write(""" description='Cling support for PyPy', - long_description=long_description, - - url='http://pypy.org', - - # Author details - author='PyPy Developers', - author_email='pypy-dev at python.org', - - license='LLVM: UoI-NCSA; ROOT: LGPL 2.1; Cppyy: LBNL BSD', - - classifiers=[ - 'Development Status :: 4 - Beta', - - 'Intended Audience :: Developers', - - 'Topic :: Software Development', - 'Topic :: Software Development :: Interpreters', - - #'License :: OSI Approved :: MIT License', - - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: Implementation :: PyPy', - 'Programming Language :: C', - 'Programming Language :: C++', - - 'Natural Language :: English' - ], - - keywords='interpreter development', - - packages=find_packages('src', ['backend']), - include_package_data=True, - - extras_require={ - }, - - cmdclass = { - 'build': my_cmake_build, - 'install': my_libs_install, - }, -) -""") - - -print('creating src ... ROOT part') -if not os.path.exists('src'): - os.mkdir('src') -os.chdir('src'); countdown += 1 -if not os.path.exists('backend'): - src = os.path.join(os.path.pardir, os.path.pardir, pkgdir) - print('now copying', src) - shutil.copytree(src, 'backend') - -print('creating src ... cppyy part') -os.chdir('backend'); countdown += 1 -if not os.path.exists('cppyy'): - os.mkdir('cppyy') - os.chdir('cppyy'); countdown += 1 - - with open('CMakeLists.txt', 'w') as outp: - outp.write("""############################################################################ -# CMakeLists.txt file for building cppyy package -############################################################################ - -ROOT_GLOB_SOURCES(sources ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cxx) -set_source_files_properties(${sources} COMPILE_FLAGS "-fomit-frame-pointer -fvisibility=hidden -DRPY_EXTERN=RPY_EXPORTED -DRPYTHON_LL2CTYPES") - -add_definitions(${CLING_CXXFLAGS}) - -ROOT_LINKER_LIBRARY(cppyy_backend ${sources} - LIBRARIES ${CMAKE_DL_LIBS} - DEPENDENCIES Core Cling RIO Thread) - -add_dependencies(cppyy_backend CLING) -""") - - os.mkdir('src') - os.chdir('src'); countdown += 1 - print('pulling cppyy/clingcwrapper.cxx from pypy') - base = 'https://bitbucket.org/pypy/pypy/raw/default/pypy/module/cppyy/' - for cppyy_file in ['src/callcontext.h', 'include/capi.h', 'src/clingcwrapper.cxx', - 'include/clingcwrapper.h', 'include/cpp_cppyy.h', 'include/cppyy.h']: - resp = urllib2.urlopen(base+cppyy_file) - with open(os.path.basename(cppyy_file), 'w') as outp: - outp.write(resp.read()) - - # fix include - inp = 'capi.h' - outp = inp+'.new' - new_cml = open(outp, 'w') - for line in open(inp).readlines(): - if 'src/precommondefs.h' in line: - line = '#include "precommondefs.h"\n' - new_cml.write(line) - new_cml.close() - os.rename(outp, inp) - - with open('precommondefs.h', 'w') as outp: - outp.write("""/***** Start of precommondefs.h *****/ - -/* This is extracted from pyconfig.h from CPython. It sets the macros - that affect the features we get from system include files. - It must not #include anything. */ - -#ifndef __PYPY_PRECOMMONDEFS_H -#define __PYPY_PRECOMMONDEFS_H - - -/* Define on Darwin to activate all library features */ -#define _DARWIN_C_SOURCE 1 -/* This must be set to 64 on some systems to enable large file support. */ -#define _FILE_OFFSET_BITS 64 -/* Define on Linux to activate all library features */ -#define _GNU_SOURCE 1 -/* This must be defined on some systems to enable large file support. */ -#define _LARGEFILE_SOURCE 1 -/* Define on NetBSD to activate all library features */ -#define _NETBSD_SOURCE 1 -/* Define to activate features from IEEE Stds 1003.1-2001 */ -#ifndef _POSIX_C_SOURCE -# define _POSIX_C_SOURCE 200112L -#endif -/* Define on FreeBSD to activate all library features */ -#define __BSD_VISIBLE 1 -#define __XSI_VISIBLE 700 -/* Windows: winsock/winsock2 mess */ -#define WIN32_LEAN_AND_MEAN -#ifdef _WIN64 - typedef __int64 Signed; - typedef unsigned __int64 Unsigned; -# define SIGNED_MIN LLONG_MIN -#else - typedef long Signed; - typedef unsigned long Unsigned; -# define SIGNED_MIN LONG_MIN -#endif - -#if !defined(RPY_ASSERT) && !defined(RPY_LL_ASSERT) && !defined(NDEBUG) -# define NDEBUG -#endif - - -/* All functions and global variables declared anywhere should use - one of the following attributes: - - RPY_EXPORTED: the symbol is exported out of libpypy-c.so. - - RPY_EXTERN: the symbol is not exported out of libpypy-c.so, but - otherwise works like 'extern' by being available to - other C sources. - - static: as usual, this means the symbol is local to this C file. - - Don't use _RPY_HIDDEN directly. For tests involving building a custom - .so, translator/tool/cbuild.py overrides RPY_EXTERN so that it becomes - equal to RPY_EXPORTED. - - Any function or global variable declared with no attribute at all is - a bug; please report or fix it. -*/ -#ifdef __GNUC__ -# define RPY_EXPORTED extern __attribute__((visibility("default"))) -# define _RPY_HIDDEN __attribute__((visibility("hidden"))) -#else -# define RPY_EXPORTED extern __declspec(dllexport) -# define _RPY_HIDDEN /* nothing */ -#endif -#ifndef RPY_EXTERN -# define RPY_EXTERN extern _RPY_HIDDEN -#endif - - -#endif /* __PYPY_PRECOMMONDEFS_H */ - -/***** End of precommondefs.h *****/ -""") - -# back up to pip package top -for i in range(countdown-1): - os.chdir(os.path.pardir) - -# add cppyy module to cmake -os.chdir('src/backend') -inp = 'CMakeLists.txt' -print('adding cppyy to cmake') -outp = inp+'.new' -new_cml = open(outp, 'w') -for line in open(inp).readlines(): - if 'add_subdirectory' in line and 'net' in line: - line += 'add_subdirectory (cppyy)\n' - new_cml.write(line) -new_cml.close() -os.rename(outp, inp) - -# done! diff --git a/pypy/module/_cppyy/capi/__init__.py b/pypy/module/_cppyy/capi/__init__.py --- a/pypy/module/_cppyy/capi/__init__.py +++ b/pypy/module/_cppyy/capi/__init__.py @@ -1,16 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype -# There are two possible ways of accessing the backend through the reflection -# C-API: built it into pypy-c, or load it dynamically. The latter is preferred -# (and is the default) for use with Reflex. B/c of some builtin pythonizations, -# the former is recommended (for now) with CINT. - -# Note: if builtin_capi is chosen, then inside builtin_capi.py, there is still -# the selection of the desired backend (default is Reflex). - -# choose C-API access method: from pypy.module._cppyy.capi.loadable_capi import * -#from pypy.module._cppyy.capi.builtin_capi import * from pypy.module._cppyy.capi.capi_types import C_OBJECT,\ C_NULL_TYPE, C_NULL_OBJECT diff --git a/pypy/module/_cppyy/capi/builtin_capi.py b/pypy/module/_cppyy/capi/builtin_capi.py deleted file mode 100644 --- a/pypy/module/_cppyy/capi/builtin_capi.py +++ /dev/null @@ -1,590 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import intmask -from rpython.rlib import jit - -import cling_capi as backend - -from pypy.module._cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR - -identify = backend.identify -pythonize = backend.pythonize -register_pythonizations = backend.register_pythonizations -std_string_name = backend.std_string_name - -ts_reflect = backend.ts_reflect -ts_call = backend.ts_call -ts_memory = backend.ts_memory -ts_helper = backend.ts_helper - -def verify_backend(space): - return True # by definition - -c_load_dictionary = backend.c_load_dictionary - -# name to opaque C++ scope representation ------------------------------------ -_c_num_scopes = rffi.llexternal( - "cppyy_num_scopes", - [C_SCOPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_num_scopes(space, cppscope): - return _c_num_scopes(cppscope.handle) -_c_scope_name = rffi.llexternal( - "cppyy_scope_name", - [C_SCOPE, rffi.INT], rffi.CCHARP, - compilation_info = backend.eci) -def c_scope_name(space, cppscope, iscope): - return charp2str_free(space, _c_scope_name(cppscope.handle, iscope)) - -_c_resolve_name = rffi.llexternal( - "cppyy_resolve_name", - [rffi.CCHARP], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_resolve_name(space, name): - return charp2str_free(space, _c_resolve_name(name)) -_c_get_scope_opaque = rffi.llexternal( - "cppyy_get_scope", - [rffi.CCHARP], C_SCOPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_scope_opaque(space, name): - return _c_get_scope_opaque(name) -_c_actual_class = rffi.llexternal( - "cppyy_actual_class", - [C_TYPE, C_OBJECT], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_actual_class(space, cppclass, cppobj): - return _c_actual_class(cppclass.handle, cppobj) - -# memory management ---------------------------------------------------------- -_c_allocate = rffi.llexternal( - "cppyy_allocate", - [C_TYPE], C_OBJECT, - releasegil=ts_memory, - compilation_info=backend.eci) -def c_allocate(space, cppclass): - return _c_allocate(cppclass.handle) -_c_deallocate = rffi.llexternal( - "cppyy_deallocate", - [C_TYPE, C_OBJECT], lltype.Void, - releasegil=ts_memory, - compilation_info=backend.eci) -def c_deallocate(space, cppclass, cppobject): - _c_deallocate(cppclass.handle, cppobject) -_c_destruct = rffi.llexternal( - "cppyy_destruct", - [C_TYPE, C_OBJECT], lltype.Void, - releasegil=ts_call, - compilation_info=backend.eci) -def c_destruct(space, cppclass, cppobject): - _c_destruct(cppclass.handle, cppobject) - -# method/function dispatching ------------------------------------------------ -_c_call_v = rffi.llexternal( - "cppyy_call_v", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], lltype.Void, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_v(space, cppmethod, cppobject, nargs, args): - _c_call_v(cppmethod, cppobject, nargs, args) -_c_call_b = rffi.llexternal( - "cppyy_call_b", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.UCHAR, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_b(space, cppmethod, cppobject, nargs, args): - return _c_call_b(cppmethod, cppobject, nargs, args) -_c_call_c = rffi.llexternal( - "cppyy_call_c", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CHAR, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_c(space, cppmethod, cppobject, nargs, args): - return _c_call_c(cppmethod, cppobject, nargs, args) -_c_call_h = rffi.llexternal( - "cppyy_call_h", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.SHORT, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_h(space, cppmethod, cppobject, nargs, args): - return _c_call_h(cppmethod, cppobject, nargs, args) -_c_call_i = rffi.llexternal( - "cppyy_call_i", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.INT, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_i(space, cppmethod, cppobject, nargs, args): - return _c_call_i(cppmethod, cppobject, nargs, args) -_c_call_l = rffi.llexternal( - "cppyy_call_l", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONG, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_l(space, cppmethod, cppobject, nargs, args): - return _c_call_l(cppmethod, cppobject, nargs, args) -_c_call_ll = rffi.llexternal( - "cppyy_call_ll", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGLONG, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_ll(space, cppmethod, cppobject, nargs, args): - return _c_call_ll(cppmethod, cppobject, nargs, args) -_c_call_f = rffi.llexternal( - "cppyy_call_f", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.FLOAT, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_f(space, cppmethod, cppobject, nargs, args): - return _c_call_f(cppmethod, cppobject, nargs, args) -_c_call_d = rffi.llexternal( - "cppyy_call_d", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.DOUBLE, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_d(space, cppmethod, cppobject, nargs, args): - return _c_call_d(cppmethod, cppobject, nargs, args) -_c_call_ld = rffi.llexternal( - "cppyy_call_ld", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_ld(space, cppmethod, cppobject, nargs, args): - return _c_call_ld(cppmethod, cppobject, nargs, args) - -_c_call_r = rffi.llexternal( - "cppyy_call_r", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.VOIDP, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_r(space, cppmethod, cppobject, nargs, args): - return _c_call_r(cppmethod, cppobject, nargs, args) -_c_call_s = rffi.llexternal( - "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_s(space, cppmethod, cppobject, nargs, args): - length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') - try: - cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) - cstr_len = intmask(length[0]) - finally: - lltype.free(length, flavor='raw') - return cstr, cstr_len - -_c_constructor = rffi.llexternal( - "cppyy_constructor", - [C_METHOD, C_TYPE, rffi.INT, rffi.VOIDP], C_OBJECT, - releasegil=ts_call, - compilation_info=backend.eci) -def c_constructor(space, cppmethod, cppobject, nargs, args): - return _c_constructor(cppmethod, cppobject, nargs, args) -_c_call_o = rffi.llexternal( - "cppyy_call_o", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, C_TYPE], rffi.LONG, - releasegil=ts_call, - compilation_info=backend.eci) -def c_call_o(space, method, cppobj, nargs, args, cppclass): - return _c_call_o(method, cppobj, nargs, args, cppclass.handle) - -_c_get_function_address = rffi.llexternal( - "cppyy_get_function_address", - [C_SCOPE, C_INDEX], C_FUNC_PTR, - releasegil=ts_reflect, - compilation_info=backend.eci, - random_effects_on_gcobjs=False) -def c_get_function_address(space, cppscope, index): - return _c_get_function_address(cppscope.handle, index) - -# handling of function argument buffer --------------------------------------- -_c_allocate_function_args = rffi.llexternal( - "cppyy_allocate_function_args", - [rffi.SIZE_T], rffi.VOIDP, - releasegil=ts_memory, - compilation_info=backend.eci) -def c_allocate_function_args(space, size): - return _c_allocate_function_args(size) -_c_deallocate_function_args = rffi.llexternal( - "cppyy_deallocate_function_args", - [rffi.VOIDP], lltype.Void, - releasegil=ts_memory, - compilation_info=backend.eci) -def c_deallocate_function_args(space, args): - _c_deallocate_function_args(args) -_c_function_arg_sizeof = rffi.llexternal( - "cppyy_function_arg_sizeof", - [], rffi.SIZE_T, - releasegil=ts_memory, - compilation_info=backend.eci, - random_effects_on_gcobjs=False) - at jit.elidable -def c_function_arg_sizeof(space): - return _c_function_arg_sizeof() -_c_function_arg_typeoffset = rffi.llexternal( - "cppyy_function_arg_typeoffset", - [], rffi.SIZE_T, - releasegil=ts_memory, - compilation_info=backend.eci, - random_effects_on_gcobjs=False) - at jit.elidable -def c_function_arg_typeoffset(space): - return _c_function_arg_typeoffset() - -# scope reflection information ----------------------------------------------- -_c_is_namespace = rffi.llexternal( - "cppyy_is_namespace", - [C_SCOPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_namespace(space, scope): - return _c_is_namespace(scope) -_c_is_template = rffi.llexternal( - "cppyy_is_template", - [rffi.CCHARP], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_template(space, name): - return _c_is_template(name) -_c_is_abstract = rffi.llexternal( - "cppyy_is_abstract", - [C_SCOPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_abstract(space, cpptype): - return _c_is_abstract(cpptype) -_c_is_enum = rffi.llexternal( - "cppyy_is_enum", - [rffi.CCHARP], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_enum(space, name): - return _c_is_enum(name) - -# type/class reflection information ------------------------------------------ -_c_final_name = rffi.llexternal( - "cppyy_final_name", - [C_TYPE], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_final_name(space, cpptype): - return charp2str_free(space, _c_final_name(cpptype)) -_c_scoped_final_name = rffi.llexternal( - "cppyy_scoped_final_name", - [C_TYPE], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_scoped_final_name(space, cpptype): - return charp2str_free(space, _c_scoped_final_name(cpptype)) -_c_has_complex_hierarchy = rffi.llexternal( - "cppyy_has_complex_hierarchy", - [C_TYPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_has_complex_hierarchy(space, cpptype): - return _c_has_complex_hierarchy(cpptype) -_c_num_bases = rffi.llexternal( - "cppyy_num_bases", - [C_TYPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_num_bases(space, cppclass): - return _c_num_bases(cppclass.handle) -_c_base_name = rffi.llexternal( - "cppyy_base_name", - [C_TYPE, rffi.INT], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_base_name(space, cppclass, base_index): - return charp2str_free(space, _c_base_name(cppclass.handle, base_index)) -_c_is_subtype = rffi.llexternal( - "cppyy_is_subtype", - [C_TYPE, C_TYPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci, - random_effects_on_gcobjs=False) - at jit.elidable -def c_is_subtype(space, derived, base): - if derived == base: - return 1 - return _c_is_subtype(derived.handle, base.handle) - -_c_base_offset = rffi.llexternal( - "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t - releasegil=ts_reflect, - compilation_info=backend.eci, - random_effects_on_gcobjs=False) - at jit.elidable -def c_base_offset(space, derived, base, address, direction): - if derived == base: - return 0 - return _c_base_offset(derived.handle, base.handle, address, direction) -def c_base_offset1(space, derived_h, base, address, direction): - return _c_base_offset(derived_h, base.handle, address, direction) - -# method/function reflection information ------------------------------------- -_c_num_methods = rffi.llexternal( - "cppyy_num_methods", - [C_SCOPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_num_methods(space, cppscope): - return _c_num_methods(cppscope.handle) -_c_method_index_at = rffi.llexternal( - "cppyy_method_index_at", - [C_SCOPE, rffi.INT], C_INDEX, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_index_at(space, cppscope, imethod): - return _c_method_index_at(cppscope.handle, imethod) -_c_method_indices_from_name = rffi.llexternal( - "cppyy_method_indices_from_name", - [C_SCOPE, rffi.CCHARP], C_INDEX_ARRAY, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_indices_from_name(space, cppscope, name): - indices = _c_method_indices_from_name(cppscope.handle, name) - if not indices: - return [] - py_indices = [] - i = 0 - index = indices[i] - while index != -1: - i += 1 - py_indices.append(index) - index = indices[i] - c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below - return py_indices - -_c_method_name = rffi.llexternal( - "cppyy_method_name", - [C_SCOPE, C_INDEX], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_name(space, cppscope, index): - return charp2str_free(space, _c_method_name(cppscope.handle, index)) -_c_method_result_type = rffi.llexternal( - "cppyy_method_result_type", - [C_SCOPE, C_INDEX], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_result_type(space, cppscope, index): - return charp2str_free(space, _c_method_result_type(cppscope.handle, index)) -_c_method_num_args = rffi.llexternal( - "cppyy_method_num_args", - [C_SCOPE, C_INDEX], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_num_args(space, cppscope, index): - return _c_method_num_args(cppscope.handle, index) -_c_method_req_args = rffi.llexternal( - "cppyy_method_req_args", - [C_SCOPE, C_INDEX], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_req_args(space, cppscope, index): - return _c_method_req_args(cppscope.handle, index) -_c_method_arg_type = rffi.llexternal( - "cppyy_method_arg_type", - [C_SCOPE, C_INDEX, rffi.INT], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_arg_type(space, cppscope, index, arg_index): - return charp2str_free(space, _c_method_arg_type(cppscope.handle, index, arg_index)) -_c_method_arg_default = rffi.llexternal( - "cppyy_method_arg_default", - [C_SCOPE, C_INDEX, rffi.INT], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_arg_default(space, cppscope, index, arg_index): - return charp2str_free(space, _c_method_arg_default(cppscope.handle, index, arg_index)) -_c_method_signature = rffi.llexternal( - "cppyy_method_signature", - [C_SCOPE, C_INDEX], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_signature(space, cppscope, index): - return charp2str_free(space, _c_method_signature(cppscope.handle, index)) - -_c_method_is_template = rffi.llexternal( - "cppyy_method_is_template", - [C_SCOPE, C_INDEX], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_method_is_template(space, cppscope, index): - return _c_method_is_template(cppscope.handle, index) -_c_method_num_template_args = rffi.llexternal( - "cppyy_method_num_template_args", - [C_SCOPE, C_INDEX], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -_c_method_template_arg_name = rffi.llexternal( - "cppyy_method_template_arg_name", - [C_SCOPE, C_INDEX, C_INDEX], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_template_args(space, cppscope, index): - nargs = _c_method_num_template_args(cppscope.handle, index) - args = [c_resolve_name(space, - charp2str_free(space, _c_method_template_arg_name(cppscope.handle, index, iarg))) - for iarg in range(nargs)] - return args - -_c_get_method = rffi.llexternal( - "cppyy_get_method", - [C_SCOPE, C_INDEX], C_METHOD, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_method(space, cppscope, index): - return _c_get_method(cppscope.handle, index) -_c_get_global_operator = rffi.llexternal( - "cppyy_get_global_operator", - [C_SCOPE, C_SCOPE, C_SCOPE, rffi.CCHARP], WLAVC_INDEX, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_global_operator(space, nss, lc, rc, op): - if nss is not None: - return _c_get_global_operator(nss.handle, lc.handle, rc.handle, op) - return rffi.cast(WLAVC_INDEX, -1) - -# method properties ---------------------------------------------------------- -_c_is_constructor = rffi.llexternal( - "cppyy_is_constructor", - [C_TYPE, C_INDEX], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_constructor(space, cppclass, index): - return _c_is_constructor(cppclass.handle, index) -_c_is_staticmethod = rffi.llexternal( - "cppyy_is_staticmethod", - [C_TYPE, C_INDEX], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_staticmethod(space, cppclass, index): - return _c_is_staticmethod(cppclass.handle, index) - -# data member reflection information ----------------------------------------- -_c_num_datamembers = rffi.llexternal( - "cppyy_num_datamembers", - [C_SCOPE], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_num_datamembers(space, cppscope): - return _c_num_datamembers(cppscope.handle) -_c_datamember_name = rffi.llexternal( - "cppyy_datamember_name", - [C_SCOPE, rffi.INT], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_datamember_name(space, cppscope, datamember_index): - return charp2str_free(space, _c_datamember_name(cppscope.handle, datamember_index)) -_c_datamember_type = rffi.llexternal( - "cppyy_datamember_type", - [C_SCOPE, rffi.INT], rffi.CCHARP, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_datamember_type(space, cppscope, datamember_index): - return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) -_c_datamember_offset = rffi.llexternal( - "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_datamember_offset(space, cppscope, datamember_index): - return _c_datamember_offset(cppscope.handle, datamember_index) - -_c_datamember_index = rffi.llexternal( - "cppyy_datamember_index", - [C_SCOPE, rffi.CCHARP], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_datamember_index(space, cppscope, name): - return _c_datamember_index(cppscope.handle, name) - -# data member properties ----------------------------------------------------- -_c_is_publicdata = rffi.llexternal( - "cppyy_is_publicdata", - [C_SCOPE, rffi.INT], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_publicdata(space, cppscope, datamember_index): - return _c_is_publicdata(cppscope.handle, datamember_index) -_c_is_staticdata = rffi.llexternal( - "cppyy_is_staticdata", - [C_SCOPE, rffi.INT], rffi.INT, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_is_staticdata(space, cppscope, datamember_index): - return _c_is_staticdata(cppscope.handle, datamember_index) - -# misc helpers --------------------------------------------------------------- -_c_strtoll = rffi.llexternal( - "cppyy_strtoll", - [rffi.CCHARP], rffi.LONGLONG, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_strtoll(space, svalue): - return _c_strtoll(svalue) -_c_strtoull = rffi.llexternal( - "cppyy_strtoull", - [rffi.CCHARP], rffi.ULONGLONG, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_strtoull(space, svalue): - return _c_strtoull(svalue) -_c_free = rffi.llexternal( - "cppyy_free", - [rffi.VOIDP], lltype.Void, - releasegil=ts_memory, - compilation_info=backend.eci) -def c_free(space, voidp): - return _c_free(voidp) - -def charp2str_free(space, charp): - string = rffi.charp2str(charp) - voidp = rffi.cast(rffi.VOIDP, charp) - _c_free(voidp) - return string - -_c_charp2stdstring = rffi.llexternal( - "cppyy_charp2stdstring", - [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_charp2stdstring(space, pystr, sz): - with rffi.scoped_view_charp(pystr) as cstr: - cppstr = _c_charp2stdstring(cstr, sz) - return cppstr -_c_stdstring2stdstring = rffi.llexternal( - "cppyy_stdstring2stdstring", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_stdstring2stdstring(space, cppobject): - return _c_stdstring2stdstring(cppobject) - -_c_stdvector_valuetype = rffi.llexternal( - "cppyy_stdvector_valuetype", - [rffi.CCHARP], rffi.CCHARP, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_stdvector_valuetype(space, pystr): - cstr = rffi.str2charp(pystr) - result = _c_stdvector_valuetype(cstr) - rffi.free_charp(cstr) - if result: - return charp2str_free(space, result) - return "" -_c_stdvector_valuesize = rffi.llexternal( - "cppyy_stdvector_valuesize", - [rffi.CCHARP], rffi.SIZE_T, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_stdvector_valuesize(space, pystr): - cstr = rffi.str2charp(pystr) - result = _c_stdvector_valuesize(cstr) - rffi.free_charp(cstr) - return result diff --git a/pypy/module/_cppyy/capi/cling_capi.py b/pypy/module/_cppyy/capi/cling_capi.py deleted file mode 100644 --- a/pypy/module/_cppyy/capi/cling_capi.py +++ /dev/null @@ -1,196 +0,0 @@ -import py, os - -from pypy.objspace.std.iterobject import W_AbstractSeqIterObject - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import intmask -from rpython.rlib import jit, libffi, rdynload - -from pypy.module._rawffi.array import W_ArrayInstance -from pypy.module._cppyy.capi.capi_types import C_OBJECT - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -import commands -(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") - -if os.environ.get("ROOTSYS"): - if config_stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), - os.path.join(os.environ["ROOTSYS"], "include"),] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - if config_stat == 0: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() - else: - rootincpath = [] - rootlibpath = [] - -def identify(): - return 'Cling' - -ts_reflect = False -ts_call = 'auto' -ts_memory = 'auto' -ts_helper = 'auto' - -std_string_name = 'std::basic_string' - -# force loading (and exposure) of libCore symbols -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -# require local translator path to pickup common defs -from rpython.translator import cdir -translator_c_dir = py.path.local(cdir) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath, translator_c_dir] + rootincpath, - includes=["clingcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Cling"], - compile_extra=["-fno-strict-aliasing", "-std=c++11"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - pch = _c_load_dictionary(name) - return pch - -_c_stdstring2charp = rffi.llexternal( - "cppyy_stdstring2charp", - [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, - releasegil=ts_helper, - compilation_info=eci) -def c_stdstring2charp(space, cppstr): - sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') - try: - cstr = _c_stdstring2charp(cppstr, sz) - cstr_len = intmask(sz[0]) - finally: - lltype.free(sz, flavor='raw') - return rffi.charpsize2str(cstr, cstr_len) - -# TODO: factor these out ... -# pythonizations - -# -# std::string behavior -def stdstring_c_str(space, w_self): - """Return a python string taking into account \0""" - - from pypy.module._cppyy import interp_cppyy - cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) - return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) - -# -# std::vector behavior -class W_STLVectorIter(W_AbstractSeqIterObject): - _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] - - def __init__(self, space, w_vector): - W_AbstractSeqIterObject.__init__(self, w_vector) - # TODO: this should live in rpythonize.py or something so that the - # imports can move to the top w/o getting circles - from pypy.module._cppyy import interp_cppyy - assert isinstance(w_vector, interp_cppyy.W_CPPInstance) - vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) - self.overload = vector.cppclass.get_overload("__getitem__") - - from pypy.module._cppyy import capi - v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) - v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) - - if not v_type or not v_size: - raise NotImplementedError # fallback on getitem - - w_arr = vector.cppclass.get_overload("data").call(w_vector, []) - arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) - if not arr: - raise OperationError(space.w_StopIteration, space.w_None) - - self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) - - from pypy.module._cppyy import converter - self.converter = converter.get_converter(space, v_type, '') - self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) - self.stride = v_size - - def descr_next(self, space): - if self.w_seq is None: - raise OperationError(space.w_StopIteration, space.w_None) - if self.len <= self.index: - self.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) - try: - from pypy.module._cppyy import capi # TODO: refector - offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) - w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) - except OperationError as e: - self.w_seq = None - if not e.match(space, space.w_IndexError): - raise - raise OperationError(space.w_StopIteration, space.w_None) - self.index += 1 - return w_item - -def stdvector_iter(space, w_self): - return W_STLVectorIter(space, w_self) - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### std::string - stdstring_c_str, - - ### std::vector - stdvector_iter, - - ] - - for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) - -def pythonize(space, name, w_pycppclass): - if name == "string": - space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") - _method_alias(space, w_pycppclass, "__str__", "c_str") - - if "vector" in name[:11]: # len('std::vector') == 11 - from pypy.module._cppyy import capi - v_type = capi.c_stdvector_valuetype(space, name) - if v_type: - space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) - v_size = capi.c_stdvector_valuesize(space, name) - if v_size: - space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) - space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/_cppyy/include/clingcwrapper.h b/pypy/module/_cppyy/include/clingcwrapper.h deleted file mode 100644 --- a/pypy/module/_cppyy/include/clingcwrapper.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef CPPYY_CLINGCWRAPPER -#define CPPYY_CLINGCWRAPPER - -#include "capi.h" - -#ifdef __cplusplus -extern "C" { -#endif // ifdef __cplusplus - - /* misc helpers */ - void* cppyy_load_dictionary(const char* lib_name); - -#ifdef __cplusplus -} -#endif // ifdef __cplusplus - -// TODO: pick up from llvm-config --cxxflags -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#ifndef __STDC_CONSTANT_MACROS -#define __STDC_CONSTANT_MACROS -#endif - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS -#endif - -#ifndef __STDC_LIMIT_MACROS -#define __STDC_LIMIT_MACROS -#endif - -// Wrapper callback: except this to become available from Cling directly -typedef void (*CPPYY_Cling_Wrapper_t)(void*, int, void**, void*); - -#endif // ifndef CPPYY_CLINGCWRAPPER diff --git a/pypy/module/_cppyy/include/cpp_cppyy.h b/pypy/module/_cppyy/include/cpp_cppyy.h deleted file mode 100644 --- a/pypy/module/_cppyy/include/cpp_cppyy.h +++ /dev/null @@ -1,143 +0,0 @@ -#ifndef PYROOT_CPPYY_H -#define PYROOT_CPPYY_H - -// Standard -#include -#include -#include - -// ROOT types -typedef long Long_t; -typedef unsigned long ULong_t; -typedef long long Long64_t; -typedef unsigned long long ULong64_t; -typedef float Float_t; -typedef double Double_t; -typedef long double LongDouble_t; -typedef bool Bool_t; -typedef char Char_t; -typedef unsigned char UChar_t; -typedef short Short_t; -typedef unsigned short UShort_t; -typedef int Int_t; -typedef unsigned int UInt_t; - -namespace Cppyy { - typedef ptrdiff_t TCppScope_t; - typedef TCppScope_t TCppType_t; - typedef void* TCppObject_t; - typedef ptrdiff_t TCppMethod_t; - - typedef Long_t TCppIndex_t; - typedef void* TCppFuncAddr_t; - -// name to opaque C++ scope representation ----------------------------------- - TCppIndex_t GetNumScopes( TCppScope_t parent ); - std::string GetScopeName( TCppScope_t parent, TCppIndex_t iscope ); - std::string ResolveName( const std::string& cppitem_name ); - TCppScope_t GetScope( const std::string& scope_name ); - TCppType_t GetActualClass( TCppType_t klass, TCppObject_t obj ); - size_t SizeOf( TCppType_t klass ); - - Bool_t IsBuiltin( const std::string& type_name ); - Bool_t IsComplete( const std::string& type_name ); - - extern TCppScope_t gGlobalScope; // for fast access - -// memory management --------------------------------------------------------- - TCppObject_t Allocate( TCppType_t type ); - void Deallocate( TCppType_t type, TCppObject_t instance ); - TCppObject_t Construct( TCppType_t type ); - void Destruct( TCppType_t type, TCppObject_t instance ); - -// method/function dispatching ----------------------------------------------- - void CallV( TCppMethod_t method, TCppObject_t self, void* args ); - UChar_t CallB( TCppMethod_t method, TCppObject_t self, void* args ); - Char_t CallC( TCppMethod_t method, TCppObject_t self, void* args ); - Short_t CallH( TCppMethod_t method, TCppObject_t self, void* args ); - Int_t CallI( TCppMethod_t method, TCppObject_t self, void* args ); - Long_t CallL( TCppMethod_t method, TCppObject_t self, void* args ); - Long64_t CallLL( TCppMethod_t method, TCppObject_t self, void* args ); - Float_t CallF( TCppMethod_t method, TCppObject_t self, void* args ); - Double_t CallD( TCppMethod_t method, TCppObject_t self, void* args ); - LongDouble_t CallLD( TCppMethod_t method, TCppObject_t self, void* args ); - void* CallR( TCppMethod_t method, TCppObject_t self, void* args ); - Char_t* CallS( TCppMethod_t method, TCppObject_t self, void* args, size_t* length ); - TCppObject_t CallConstructor( TCppMethod_t method, TCppType_t type, void* args ); - void CallDestructor( TCppType_t type, TCppObject_t self ); - TCppObject_t CallO( TCppMethod_t method, TCppObject_t self, void* args, TCppType_t result_type ); - - TCppFuncAddr_t GetFunctionAddress( TCppScope_t scope, TCppIndex_t imeth ); - -// handling of function argument buffer -------------------------------------- - void* AllocateFunctionArgs( size_t nargs ); - void DeallocateFunctionArgs( void* args ); - size_t GetFunctionArgSizeof(); - size_t GetFunctionArgTypeoffset(); - -// scope reflection information ---------------------------------------------- - Bool_t IsNamespace( TCppScope_t scope ); - Bool_t IsTemplate( const std::string& template_name ); - Bool_t IsAbstract( TCppType_t type ); - Bool_t IsEnum( const std::string& type_name ); - -// class reflection information ---------------------------------------------- - std::string GetFinalName( TCppType_t type ); - std::string GetScopedFinalName( TCppType_t type ); - Bool_t HasComplexHierarchy( TCppType_t type ); - TCppIndex_t GetNumBases( TCppType_t type ); - std::string GetBaseName( TCppType_t type, TCppIndex_t ibase ); - Bool_t IsSubtype( TCppType_t derived, TCppType_t base ); - void AddSmartPtrType( const std::string& ); - Bool_t IsSmartPtr( const std::string& ); - -// calculate offsets between declared and actual type, up-cast: direction > 0; down-cast: direction < 0 - ptrdiff_t GetBaseOffset( - TCppType_t derived, TCppType_t base, TCppObject_t address, int direction, bool rerror = false ); - -// method/function reflection information ------------------------------------ - TCppIndex_t GetNumMethods( TCppScope_t scope ); - TCppIndex_t GetMethodIndexAt( TCppScope_t scope, TCppIndex_t imeth ); - std::vector< TCppMethod_t > GetMethodsFromName( TCppScope_t scope, const std::string& name ); - - TCppMethod_t GetMethod( TCppScope_t scope, TCppIndex_t imeth ); - - std::string GetMethodName( TCppMethod_t ); - std::string GetMethodResultType( TCppMethod_t ); - TCppIndex_t GetMethodNumArgs( TCppMethod_t ); - TCppIndex_t GetMethodReqArgs( TCppMethod_t ); - std::string GetMethodArgName( TCppMethod_t, int iarg ); - std::string GetMethodArgType( TCppMethod_t, int iarg ); - std::string GetMethodArgDefault( TCppMethod_t, int iarg ); - std::string GetMethodSignature( TCppScope_t scope, TCppIndex_t imeth ); - Bool_t IsConstMethod( TCppMethod_t ); - - Bool_t IsMethodTemplate( TCppMethod_t ); - TCppIndex_t GetMethodNumTemplateArgs( TCppScope_t scope, TCppIndex_t imeth ); - std::string GetMethodTemplateArgName( TCppScope_t scope, TCppIndex_t imeth, TCppIndex_t iarg ); - - TCppIndex_t GetGlobalOperator( - TCppType_t scope, TCppType_t lc, TCppScope_t rc, const std::string& op ); - -// method properties --------------------------------------------------------- - Bool_t IsConstructor( TCppMethod_t method ); - Bool_t IsPublicMethod( TCppMethod_t method ); - Bool_t IsStaticMethod( TCppMethod_t method ); - -// data member reflection information ---------------------------------------- - TCppIndex_t GetNumDatamembers( TCppScope_t scope ); - std::string GetDatamemberName( TCppScope_t scope, TCppIndex_t idata ); - std::string GetDatamemberType( TCppScope_t scope, TCppIndex_t idata ); - ptrdiff_t GetDatamemberOffset( TCppScope_t scope, TCppIndex_t idata ); - TCppIndex_t GetDatamemberIndex( TCppScope_t scope, const std::string& name ); - -// data member properties ---------------------------------------------------- - Bool_t IsPublicData( TCppScope_t scope, TCppIndex_t idata ); - Bool_t IsStaticData( TCppScope_t scope, TCppIndex_t idata ); - Bool_t IsConstData( TCppScope_t scope, TCppIndex_t idata ); - Bool_t IsEnumData( TCppScope_t scope, TCppIndex_t idata ); - Int_t GetDimensionSize( TCppScope_t scope, TCppIndex_t idata, int dimension ); - -} // namespace Cppyy - -#endif // ifndef PYROOT_CPPYY_H diff --git a/pypy/module/_cppyy/include/cppyy.h b/pypy/module/_cppyy/include/cppyy.h deleted file mode 100644 --- a/pypy/module/_cppyy/include/cppyy.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef CPPYY_CPPYY -#define CPPYY_CPPYY - -#include "cpp_cppyy.h" - -#ifdef __cplusplus -struct CPPYY_G__DUMMY_FOR_CINT7 { -#else -typedef struct -#endif - void* fTypeName; - unsigned int fModifiers; -#ifdef __cplusplus -}; -#else -} CPPYY_G__DUMMY_FOR_CINT7; -#endif - -#ifdef __cplusplus -struct CPPYY_G__p2p { -#else -typedef struct { -#endif - long i; - int reftype; -#ifdef __cplusplus -}; -#else -} CPPYY_G__p2p; -#endif - - -#ifdef __cplusplus -struct CPPYY_G__value { -#else -typedef struct { -#endif - union { - double d; - long i; /* used to be int */ - struct CPPYY_G__p2p reftype; - char ch; - short sh; - int in; - float fl; - unsigned char uch; - unsigned short ush; - unsigned int uin; - unsigned long ulo; - long long ll; - unsigned long long ull; - long double ld; - } obj; - long ref; - int type; - int tagnum; - int typenum; - char isconst; - struct CPPYY_G__DUMMY_FOR_CINT7 dummyForCint7; -#ifdef __cplusplus -}; -#else -} CPPYY_G__value; -#endif - -#endif // CPPYY_CPPYY diff --git a/pypy/module/_cppyy/src/callcontext.h b/pypy/module/_cppyy/src/callcontext.h deleted file mode 100644 --- a/pypy/module/_cppyy/src/callcontext.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef PYROOT_TCALLCONTEXT_H -#define PYROOT_TCALLCONTEXT_H - -// Standard -#include - -//Bindings -#include "cpp_cppyy.h" - -//ROOT -#include "Rtypes.h" - -namespace PyROOT { - -// general place holder for function parameters - struct TParameter { - union Value { - Bool_t fBool; - Short_t fShort; - UShort_t fUShort; - Int_t fInt; - UInt_t fUInt; - Long_t fLong; - ULong_t fULong; - Long64_t fLongLong; - ULong64_t fULongLong; - Float_t fFloat; - Double_t fDouble; - LongDouble_t fLongDouble; - void* fVoidp; - } fValue; - void* fRef; - char fTypeCode; - }; - -// extra call information - struct TCallContext { - TCallContext( std::vector< TParameter >::size_type sz = 0 ) : fArgs( sz ), fFlags( 0 ) {} - - enum ECallFlags { - kNone = 0, - kIsSorted = 1, // if method overload priority determined - kIsCreator = 2, // if method creates python-owned objects - kIsConstructor = 4, // if method is a C++ constructor - kUseHeuristics = 8, // if method applies heuristics memory policy - kUseStrict = 16, // if method applies strict memory policy - kManageSmartPtr = 32, // if executor should manage smart pointers - kReleaseGIL = 64, // if method should release the GIL - kFast = 128, // if method should NOT handle signals - kSafe = 256 // if method should return on signals - }; - - // memory handling - static ECallFlags sMemoryPolicy; - static Bool_t SetMemoryPolicy( ECallFlags e ); - - // signal safety - static ECallFlags sSignalPolicy; - static Bool_t SetSignalPolicy( ECallFlags e ); - - // payload - std::vector< TParameter > fArgs; - UInt_t fFlags; - }; - - inline Bool_t IsSorted( UInt_t flags ) { - return flags & TCallContext::kIsSorted; - } - - inline Bool_t IsCreator( UInt_t flags ) { - return flags & TCallContext::kIsCreator; - } - - inline Bool_t IsConstructor( UInt_t flags ) { - return flags & TCallContext::kIsConstructor; - } - - inline Bool_t ManagesSmartPtr( TCallContext* ctxt ) { - return ctxt->fFlags & TCallContext::kManageSmartPtr; - } - - inline Bool_t ReleasesGIL( UInt_t flags ) { - return flags & TCallContext::kReleaseGIL; - } - - inline Bool_t ReleasesGIL( TCallContext* ctxt ) { - return ctxt ? (ctxt->fFlags & TCallContext::kReleaseGIL) : kFALSE; - } - - inline Bool_t UseStrictOwnership( TCallContext* ctxt ) { - if ( ctxt && (ctxt->fFlags & TCallContext::kUseStrict) ) - return kTRUE; - if ( ctxt && (ctxt->fFlags & TCallContext::kUseHeuristics) ) - return kFALSE; - - return TCallContext::sMemoryPolicy == TCallContext::kUseStrict; - } - -} // namespace PyROOT - -#endif // !PYROOT_TCALLCONTEXT_H diff --git a/pypy/module/_cppyy/src/clingcwrapper.cxx b/pypy/module/_cppyy/src/clingcwrapper.cxx deleted file mode 100644 --- a/pypy/module/_cppyy/src/clingcwrapper.cxx +++ /dev/null @@ -1,1569 +0,0 @@ -// Bindings -#include "capi.h" -#include "cpp_cppyy.h" -#include "callcontext.h" - -// ROOT -#include "TBaseClass.h" -#include "TClass.h" -#include "TClassRef.h" -#include "TClassTable.h" -#include "TClassEdit.h" -#include "TCollection.h" -#include "TDataMember.h" -#include "TDataType.h" -#include "TError.h" -#include "TFunction.h" -#include "TGlobal.h" -#include "TInterpreter.h" -#include "TList.h" -#include "TMethod.h" -#include "TMethodArg.h" -#include "TROOT.h" -#include "TSystem.h" - -// Standard -#include -#include // for std::count -#include -#include -#include -#include -#include // for getenv - -// temp -#include -typedef PyROOT::TParameter TParameter; -// --temp - - -// small number that allows use of stack for argument passing -const int SMALL_ARGS_N = 8; - -// data for life time management --------------------------------------------- -typedef std::vector< TClassRef > ClassRefs_t; -static ClassRefs_t g_classrefs( 1 ); -static const ClassRefs_t::size_type GLOBAL_HANDLE = 1; - -typedef std::map< std::string, ClassRefs_t::size_type > Name2ClassRefIndex_t; -static Name2ClassRefIndex_t g_name2classrefidx; - -typedef std::map< Cppyy::TCppMethod_t, CallFunc_t* > Method2CallFunc_t; -static Method2CallFunc_t g_method2callfunc; - -typedef std::vector< TGlobal* > GlobalVars_t; -static GlobalVars_t g_globalvars; - -// data ---------------------------------------------------------------------- -Cppyy::TCppScope_t Cppyy::gGlobalScope = GLOBAL_HANDLE; - -// smart pointer types -static std::set< std::string > gSmartPtrTypes = - { "auto_ptr", "shared_ptr", "weak_ptr", "unique_ptr" }; - -// configuration -static bool gEnableFastPath = true; - - -// global initialization ----------------------------------------------------- -namespace { - -class ApplicationStarter { -public: - ApplicationStarter() { - // setup dummy holders for global and std namespaces - assert( g_classrefs.size() == GLOBAL_HANDLE ); - g_name2classrefidx[ "" ] = GLOBAL_HANDLE; - g_classrefs.push_back(TClassRef("")); - // aliases for std (setup already in pythonify) - g_name2classrefidx[ "std" ] = GLOBAL_HANDLE+1; - g_name2classrefidx[ "::std" ] = GLOBAL_HANDLE+1; - g_classrefs.push_back(TClassRef("std")); - // add a dummy global to refer to as null at index 0 - g_globalvars.push_back( nullptr ); - // disable fast path if requested - if (getenv("CPPYY_DISABLE_FASTPATH")) gEnableFastPath = false; - } - - ~ApplicationStarter() { - for ( auto ifunc : g_method2callfunc ) - gInterpreter->CallFunc_Delete( ifunc.second ); - } -} _applicationStarter; - -} // unnamed namespace - -// local helpers ------------------------------------------------------------- -static inline -TClassRef& type_from_handle( Cppyy::TCppScope_t scope ) -{ - assert( (ClassRefs_t::size_type) scope < g_classrefs.size() ); - return g_classrefs[ (ClassRefs_t::size_type)scope ]; -} - -// type_from_handle to go here -static inline -TFunction* type_get_method( Cppyy::TCppType_t klass, Cppyy::TCppIndex_t idx ) -{ - TClassRef& cr = type_from_handle( klass ); - if ( cr.GetClass() ) - return (TFunction*)cr->GetListOfMethods()->At( idx ); - assert( klass == (Cppyy::TCppType_t)GLOBAL_HANDLE ); - return (TFunction*)idx; -} - -static inline -Cppyy::TCppScope_t declaring_scope( Cppyy::TCppMethod_t method ) -{ - TMethod* m = dynamic_cast( (TFunction*)method ); - if ( m ) return Cppyy::GetScope( m->GetClass()->GetName() ); - return (Cppyy::TCppScope_t)GLOBAL_HANDLE; -} - -static inline -char* cppstring_to_cstring( const std::string& cppstr ) { - char* cstr = (char*)malloc( cppstr.size() + 1 ); - memcpy( cstr, cppstr.c_str(), cppstr.size() + 1 ); - return cstr; -} - - -// name to opaque C++ scope representation ----------------------------------- -Cppyy::TCppIndex_t Cppyy::GetNumScopes( TCppScope_t scope ) -{ - TClassRef& cr = type_from_handle( scope ); - if ( cr.GetClass() ) { - // this is expensive, but this function is only ever called for __dir__ - // TODO: rewrite __dir__ on the C++ side for a single loop - std::string s = GetFinalName( scope ); s += "::"; - gClassTable->Init(); - const int N = gClassTable->Classes(); - int total = 0; - for ( int i = 0; i < N; ++i ) { - if ( strncmp( gClassTable->Next(), s.c_str(), s.size() ) == 0 ) - total += 1; - } - return total; - } - assert( scope == (TCppScope_t)GLOBAL_HANDLE ); - return gClassTable->Classes(); -} - -std::string Cppyy::GetScopeName( TCppScope_t parent, TCppIndex_t iscope ) -{ -// Retrieve the scope name of the scope indexed with iscope in parent. - TClassRef& cr = type_from_handle( parent ); - if ( cr.GetClass() ) { - // this is expensive (quadratic in number of classes), but only ever called for __dir__ - // TODO: rewrite __dir__ on the C++ side for a single loop From pypy.commits at gmail.com Mon Jul 31 20:07:51 2017 From: pypy.commits at gmail.com (wlav) Date: Mon, 31 Jul 2017 17:07:51 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: remove old genreflex method ptr patch (not needed with Cling) Message-ID: <597fc657.a18ddf0a.9b9ef.98ed@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r92009:f141497a5252 Date: 2017-07-31 16:49 -0700 http://bitbucket.org/pypy/pypy/changeset/f141497a5252/ Log: remove old genreflex method ptr patch (not needed with Cling) diff --git a/pypy/module/_cppyy/genreflex-methptrgetter.patch b/pypy/module/_cppyy/genreflex-methptrgetter.patch deleted file mode 100644 --- a/pypy/module/_cppyy/genreflex-methptrgetter.patch +++ /dev/null @@ -1,125 +0,0 @@ -Index: cint/reflex/python/genreflex/gendict.py -=================================================================== ---- cint/reflex/python/genreflex/gendict.py (revision 43705) -+++ cint/reflex/python/genreflex/gendict.py (working copy) -@@ -52,6 +52,7 @@ - self.typedefs_for_usr = [] - self.gccxmlvers = gccxmlvers - self.split = opts.get('split', '') -+ self.with_methptrgetter = opts.get('with_methptrgetter', False) - # The next is to avoid a known problem with gccxml that it generates a - # references to id equal '_0' which is not defined anywhere - self.xref['_0'] = {'elem':'Unknown', 'attrs':{'id':'_0','name':''}, 'subelems':[]} -@@ -1328,6 +1329,8 @@ - bases = self.getBases( attrs['id'] ) - if inner and attrs.has_key('demangled') and self.isUnnamedType(attrs['demangled']) : - cls = attrs['demangled'] -+ if self.xref[attrs['id']]['elem'] == 'Union': -+ return 80*' ' - clt = '' - else: - cls = self.genTypeName(attrs['id'],const=True,colon=True) -@@ -1365,7 +1368,7 @@ - # Inner class/struct/union/enum. - for m in memList : - member = self.xref[m] -- if member['elem'] in ('Class','Struct','Union','Enumeration') \ -+ if member['elem'] in ('Class','Struct','Enumeration') \ - and member['attrs'].get('access') in ('private','protected') \ - and not self.isUnnamedType(member['attrs'].get('demangled')): - cmem = self.genTypeName(member['attrs']['id'],const=True,colon=True) -@@ -2003,8 +2006,15 @@ - else : params = '0' - s = ' .AddFunctionMember(%s, Reflex::Literal("%s"), %s%s, 0, %s, %s)' % (self.genTypeID(id), name, type, id, params, mod) - s += self.genCommentProperty(attrs) -+ s += self.genMethPtrGetterProperty(type, attrs) - return s - #---------------------------------------------------------------------------------- -+ def genMethPtrGetterProperty(self, type, attrs): -+ funcname = self.nameOfMethPtrGetter(type, attrs) -+ if funcname is None: -+ return '' -+ return '\n .AddProperty("MethPtrGetter", (void*)%s)' % funcname -+#---------------------------------------------------------------------------------- - def genMCODef(self, type, name, attrs, args): - id = attrs['id'] - cl = self.genTypeName(attrs['context'],colon=True) -@@ -2071,8 +2081,44 @@ - if returns == 'void' : body += ' }\n' - else : body += ' }\n' - body += '}\n' -- return head + body; -+ methptrgetter = self.genMethPtrGetter(type, name, attrs, args) -+ return head + body + methptrgetter - #---------------------------------------------------------------------------------- -+ def nameOfMethPtrGetter(self, type, attrs): -+ id = attrs['id'] -+ if self.with_methptrgetter and 'static' not in attrs and type in ('operator', 'method'): -+ return '%s%s_methptrgetter' % (type, id) -+ return None -+#---------------------------------------------------------------------------------- -+ def genMethPtrGetter(self, type, name, attrs, args): -+ funcname = self.nameOfMethPtrGetter(type, attrs) -+ if funcname is None: -+ return '' -+ id = attrs['id'] -+ cl = self.genTypeName(attrs['context'],colon=True) -+ rettype = self.genTypeName(attrs['returns'],enum=True, const=True, colon=True) -+ arg_type_list = [self.genTypeName(arg['type'], colon=True) for arg in args] -+ constness = attrs.get('const', 0) and 'const' or '' -+ lines = [] -+ a = lines.append -+ a('static void* %s(void* o)' % (funcname,)) -+ a('{') -+ if name == 'EmitVA': -+ # TODO: this is for ROOT TQObject, the problem being that ellipses is not -+ # exposed in the arguments and that makes the generated code fail if the named -+ # method is overloaded as is with TQObject::EmitVA -+ a(' return (void*)0;') -+ else: -+ # declare a variable "meth" which is a member pointer -+ a(' %s (%s::*meth)(%s)%s;' % (rettype, cl, ', '.join(arg_type_list), constness)) -+ a(' meth = (%s (%s::*)(%s)%s)&%s::%s;' % \ -+ (rettype, cl, ', '.join(arg_type_list), constness, cl, name)) -+ a(' %s* obj = (%s*)o;' % (cl, cl)) -+ a(' return (void*)(obj->*meth);') -+ a('}') -+ return '\n'.join(lines) -+ -+#---------------------------------------------------------------------------------- - def getDefaultArgs(self, args): - n = 0 - for a in args : -Index: cint/reflex/python/genreflex/genreflex.py -=================================================================== ---- cint/reflex/python/genreflex/genreflex.py (revision 43705) -+++ cint/reflex/python/genreflex/genreflex.py (working copy) -@@ -108,6 +108,10 @@ - Print extra debug information while processing. Keep intermediate files\n - --quiet - Do not print informational messages\n -+ --with-methptrgetter -+ Add the property MethPtrGetter to every FunctionMember. It contains a pointer to a -+ function which you can call to get the actual function pointer of the method that it's -+ stored in the vtable. It works only with gcc. - -h, --help - Print this help\n - """ -@@ -128,7 +132,7 @@ - ['help','debug=', 'output=','selection_file=','pool','dataonly','interpreteronly','deep','gccxmlpath=', - 'capabilities=','rootmap=','rootmap-lib=','comments','iocomments','no_membertypedefs', - 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', -- 'library=']) -+ 'library=', 'with-methptrgetter']) - except getopt.GetoptError, e: - print "--->> genreflex: ERROR:",e - self.usage(2) -@@ -187,6 +191,8 @@ - self.rootmap = a - if o in ('--rootmap-lib',): - self.rootmaplib = a -+ if o in ('--with-methptrgetter',): -+ self.opts['with_methptrgetter'] = True - if o in ('-I', '-U', '-D', '-P', '-C') : - # escape quotes; we need to use " because of windows cmd - poseq = a.find('=') From pypy.commits at gmail.com Mon Jul 31 20:07:53 2017 From: pypy.commits at gmail.com (wlav) Date: Mon, 31 Jul 2017 17:07:53 -0700 (PDT) Subject: [pypy-commit] pypy cppyy-packaging: remove benchmarking code Message-ID: <597fc659.10841c0a.af007.048d@mx.google.com> Author: Wim Lavrijsen Branch: cppyy-packaging Changeset: r92010:74ed34cfb42c Date: 2017-07-31 16:51 -0700 http://bitbucket.org/pypy/pypy/changeset/74ed34cfb42c/ Log: remove benchmarking code diff --git a/pypy/module/_cppyy/bench/Makefile b/pypy/module/_cppyy/bench/Makefile deleted file mode 100644 --- a/pypy/module/_cppyy/bench/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -all: bench02Dict_reflex.so - -ROOTSYS := ${ROOTSYS} - -ifeq ($(ROOTSYS),) - genreflex=genreflex - cppflags= -else - genreflex=$(ROOTSYS)/bin/genreflex - cppflags=-I$(ROOTSYS)/include -L$(ROOTSYS)/lib -endif - -PLATFORM := $(shell uname -s) -ifeq ($(PLATFORM),Darwin) - cppflags+=-dynamiclib -single_module -arch x86_64 -endif - -ifeq ($(shell $(genreflex) --help | grep -- --with-methptrgetter),) - genreflexflags= - cppflags2=-O3 -fPIC -else - genreflexflags=--with-methptrgetter - cppflags2=-Wno-pmf-conversions -O3 -fPIC -endif - - -bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml - $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/_cppyy/bench/bench02.cxx b/pypy/module/_cppyy/bench/bench02.cxx deleted file mode 100644 --- a/pypy/module/_cppyy/bench/bench02.cxx +++ /dev/null @@ -1,79 +0,0 @@ -#include "bench02.h" - -#include "TROOT.h" -#include "TApplication.h" -#include "TDirectory.h" -#include "TInterpreter.h" -#include "TSystem.h" -#include "TBenchmark.h" -#include "TStyle.h" -#include "TError.h" -#include "Getline.h" -#include "TVirtualX.h" - -#include "Api.h" - -#include - -TClass *TClass::GetClass(const char*, Bool_t, Bool_t) { - static TClass* dummy = new TClass("__dummy__", kTRUE); - return dummy; // is deleted by gROOT at shutdown -} - -class TTestApplication : public TApplication { -public: - TTestApplication( - const char* acn, Int_t* argc, char** argv, Bool_t bLoadLibs = kTRUE); - virtual ~TTestApplication(); -}; - -TTestApplication::TTestApplication( - const char* acn, int* argc, char** argv, bool do_load) : TApplication(acn, argc, argv) { - if (do_load) { - // follow TRint to minimize differences with CINT - ProcessLine("#include ", kTRUE); - ProcessLine("#include <_string>", kTRUE); // for std::string iostream. - ProcessLine("#include ", kTRUE); // needed because they're used within the - ProcessLine("#include ", kTRUE); // core ROOT dicts and CINT won't be able - // to properly unload these files - } - - // save current interpreter context - gInterpreter->SaveContext(); - gInterpreter->SaveGlobalsContext(); - - // prevent crashes on accessing history - Gl_histinit((char*)"-"); - - // prevent ROOT from exiting python - SetReturnFromRun(kTRUE); -} - -TTestApplication::~TTestApplication() {} - -static const char* appname = "pypy-cppyy"; - -Bench02RootApp::Bench02RootApp() { - gROOT->SetBatch(kTRUE); - if (!gApplication) { - int argc = 1; - char* argv[1]; argv[0] = (char*)appname; - gApplication = new TTestApplication(appname, &argc, argv, kFALSE); - } -} - -Bench02RootApp::~Bench02RootApp() { - // TODO: ROOT globals cleanup ... (?) -} - -void Bench02RootApp::report() { - std::cout << "gROOT is: " << gROOT << std::endl; - std::cout << "gApplication is: " << gApplication << std::endl; -} - -void Bench02RootApp::close_file(TFile* f) { - std::cout << "closing file " << f->GetName() << " ... " << std::endl; - f->Write(); - f->Close(); - std::cout << "... file closed" << std::endl; -} diff --git a/pypy/module/_cppyy/bench/bench02.h b/pypy/module/_cppyy/bench/bench02.h deleted file mode 100644 --- a/pypy/module/_cppyy/bench/bench02.h +++ /dev/null @@ -1,72 +0,0 @@ -#include "TString.h" - -#include "TCanvas.h" -#include "TFile.h" -#include "TProfile.h" -#include "TNtuple.h" -#include "TH1F.h" -#include "TH2F.h" -#include "TRandom.h" -#include "TRandom3.h" - -#include "TROOT.h" -#include "TApplication.h" -#include "TSystem.h" - -#include "TArchiveFile.h" -#include "TBasket.h" -#include "TBenchmark.h" -#include "TBox.h" -#include "TBranchRef.h" -#include "TBrowser.h" -#include "TClassGenerator.h" -#include "TClassRef.h" -#include "TClassStreamer.h" -#include "TContextMenu.h" -#include "TEntryList.h" -#include "TEventList.h" -#include "TF1.h" -#include "TFileCacheRead.h" -#include "TFileCacheWrite.h" -#include "TFileMergeInfo.h" -#include "TFitResult.h" -#include "TFolder.h" -//#include "TFormulaPrimitive.h" -#include "TFunction.h" -#include "TFrame.h" -#include "TGlobal.h" -#include "THashList.h" -#include "TInetAddress.h" -#include "TInterpreter.h" -#include "TKey.h" -#include "TLegend.h" -#include "TMethodCall.h" -#include "TPluginManager.h" -#include "TProcessUUID.h" -#include "TSchemaRuleSet.h" -#include "TStyle.h" -#include "TSysEvtHandler.h" -#include "TTimer.h" -#include "TView.h" -//#include "TVirtualCollectionProxy.h" -#include "TVirtualFFT.h" -#include "TVirtualHistPainter.h" -#include "TVirtualIndex.h" -#include "TVirtualIsAProxy.h" -#include "TVirtualPadPainter.h" -#include "TVirtualRefProxy.h" -#include "TVirtualStreamerInfo.h" -#include "TVirtualViewer3D.h" - -#include -#include - - -class Bench02RootApp { -public: - Bench02RootApp(); - ~Bench02RootApp(); - - void report(); - void close_file(TFile* f); -}; diff --git a/pypy/module/_cppyy/bench/bench02.xml b/pypy/module/_cppyy/bench/bench02.xml deleted file mode 100644 --- a/pypy/module/_cppyy/bench/bench02.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/pypy/module/_cppyy/bench/hsimple.C b/pypy/module/_cppyy/bench/hsimple.C deleted file mode 100644 --- a/pypy/module/_cppyy/bench/hsimple.C +++ /dev/null @@ -1,109 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -TFile *hsimple(Int_t get=0) -{ -// This program creates : -// - a one dimensional histogram -// - a two dimensional histogram -// - a profile histogram -// - a memory-resident ntuple -// -// These objects are filled with some random numbers and saved on a file. -// If get=1 the macro returns a pointer to the TFile of "hsimple.root" -// if this file exists, otherwise it is created. -// The file "hsimple.root" is created in $ROOTSYS/tutorials if the caller has -// write access to this directory, otherwise the file is created in $PWD - - TString filename = "hsimple.root"; - TString dir = gSystem->UnixPathName(gInterpreter->GetCurrentMacroName()); - dir.ReplaceAll("hsimple.C",""); - dir.ReplaceAll("/./","/"); - TFile *hfile = 0; - if (get) { - // if the argument get =1 return the file "hsimple.root" - // if the file does not exist, it is created - TString fullPath = dir+"hsimple.root"; - if (!gSystem->AccessPathName(fullPath,kFileExists)) { - hfile = TFile::Open(fullPath); //in $ROOTSYS/tutorials - if (hfile) return hfile; - } - //otherwise try $PWD/hsimple.root - if (!gSystem->AccessPathName("hsimple.root",kFileExists)) { - hfile = TFile::Open("hsimple.root"); //in current dir - if (hfile) return hfile; - } - } - //no hsimple.root file found. Must generate it ! - //generate hsimple.root in $ROOTSYS/tutorials if we have write access - if (!gSystem->AccessPathName(dir,kWritePermission)) { - filename = dir+"hsimple.root"; - } else if (!gSystem->AccessPathName(".",kWritePermission)) { - //otherwise generate hsimple.root in the current directory - } else { - printf("you must run the script in a directory with write access\n"); - return 0; - } - hfile = (TFile*)gROOT->FindObject(filename); if (hfile) hfile->Close(); - hfile = new TFile(filename,"RECREATE","Demo ROOT file with histograms"); - - // Create some histograms, a profile histogram and an ntuple - TH1F *hpx = new TH1F("hpx","This is the px distribution",100,-4,4); - hpx->SetFillColor(48); - TH2F *hpxpy = new TH2F("hpxpy","py vs px",40,-4,4,40,-4,4); - TProfile *hprof = new TProfile("hprof","Profile of pz versus px",100,-4,4,0,20); - TNtuple *ntuple = new TNtuple("ntuple","Demo ntuple","px:py:pz:random:i"); - - gBenchmark->Start("hsimple"); - - // Create a new canvas. - TCanvas *c1 = new TCanvas("c1","Dynamic Filling Example",200,10,700,500); - c1->SetFillColor(42); - c1->GetFrame()->SetFillColor(21); - c1->GetFrame()->SetBorderSize(6); - c1->GetFrame()->SetBorderMode(-1); - - - // Fill histograms randomly - TRandom3 random; - Float_t px, py, pz; - const Int_t kUPDATE = 1000; - for (Int_t i = 0; i < 50000; i++) { - // random.Rannor(px,py); - px = random.Gaus(0, 1); - py = random.Gaus(0, 1); - pz = px*px + py*py; - Float_t rnd = random.Rndm(1); - hpx->Fill(px); - hpxpy->Fill(px,py); - hprof->Fill(px,pz); - ntuple->Fill(px,py,pz,rnd,i); - if (i && (i%kUPDATE) == 0) { - if (i == kUPDATE) hpx->Draw(); - c1->Modified(); - c1->Update(); - if (gSystem->ProcessEvents()) - break; - } - } - gBenchmark->Show("hsimple"); - - // Save all objects in this file - hpx->SetFillColor(0); - hfile->Write(); - hpx->SetFillColor(48); - c1->Modified(); - return hfile; - -// Note that the file is automatically close when application terminates -// or when the file destructor is called. -} diff --git a/pypy/module/_cppyy/bench/hsimple.py b/pypy/module/_cppyy/bench/hsimple.py deleted file mode 100755 --- a/pypy/module/_cppyy/bench/hsimple.py +++ /dev/null @@ -1,110 +0,0 @@ -#*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* -#*-* -#*-* This program creates : -#*-* - a one dimensional histogram -#*-* - a two dimensional histogram -#*-* - a profile histogram -#*-* - a memory-resident ntuple -#*-* -#*-* These objects are filled with some random numbers and saved on a file. -#*-* -#*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* - -_reflex = True # to keep things equal, set to False for full macro - -try: - import cppyy, random - - if not hasattr(cppyy.gbl, 'gROOT'): - cppyy.load_reflection_info('bench02Dict_reflex.so') - _reflex = True - - TCanvas = cppyy.gbl.TCanvas - TFile = cppyy.gbl.TFile - TProfile = cppyy.gbl.TProfile - TNtuple = cppyy.gbl.TNtuple - TH1F = cppyy.gbl.TH1F - TH2F = cppyy.gbl.TH2F - TRandom3 = cppyy.gbl.TRandom3 - - gROOT = cppyy.gbl.gROOT - gBenchmark = cppyy.gbl.TBenchmark() - gSystem = cppyy.gbl.gSystem - -except ImportError: - from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F, TRandom3 - from ROOT import gROOT, gBenchmark, gSystem - import random - -if _reflex: - gROOT.SetBatch(True) - -# Create a new ROOT binary machine independent file. -# Note that this file may contain any kind of ROOT objects, histograms, -# pictures, graphics objects, detector geometries, tracks, events, etc.. -# This file is now becoming the current directory. - -if not _reflex: - hfile = gROOT.FindObject('hsimple.root') - if hfile: - hfile.Close() - hfile = TFile('hsimple.root', 'RECREATE', 'Demo ROOT file with histograms' ) - -# Create some histograms, a profile histogram and an ntuple -hpx = TH1F('hpx', 'This is the px distribution', 100, -4, 4) -hpx.SetFillColor(48) -hpxpy = TH2F('hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4) -hprof = TProfile('hprof', 'Profile of pz versus px', 100, -4, 4, 0, 20) -if not _reflex: - ntuple = TNtuple('ntuple', 'Demo ntuple', 'px:py:pz:random:i') - -gBenchmark.Start('hsimple') - -# Create a new canvas, and customize it. -c1 = TCanvas('c1', 'Dynamic Filling Example', 200, 10, 700, 500) -c1.SetFillColor(42) -c1.GetFrame().SetFillColor(21) -c1.GetFrame().SetBorderSize(6) -c1.GetFrame().SetBorderMode(-1) - -# Fill histograms randomly. -random = TRandom3() -kUPDATE = 1000 -for i in xrange(50000): - # Generate random numbers -# px, py = random.gauss(0, 1), random.gauss(0, 1) - px, py = random.Gaus(0, 1), random.Gaus(0, 1) - pz = px*px + py*py -# rnd = random.random() - rnd = random.Rndm(1) - - # Fill histograms - hpx.Fill(px) - hpxpy.Fill(px, py) - hprof.Fill(px, pz) - if not _reflex: - ntuple.Fill(px, py, pz, rnd, i) - - # Update display every kUPDATE events - if i and i%kUPDATE == 0: - if i == kUPDATE: - hpx.Draw() - - c1.Modified(True) - c1.Update() - - if gSystem.ProcessEvents(): # allow user interrupt - break - -gBenchmark.Show( 'hsimple' ) - -# Save all objects in this file -hpx.SetFillColor(0) -if not _reflex: - hfile.Write() -hpx.SetFillColor(48) -c1.Modified(True) -c1.Update() - -# Note that the file is automatically closed when application terminates -# or when the file destructor is called. diff --git a/pypy/module/_cppyy/bench/hsimple_rflx.py b/pypy/module/_cppyy/bench/hsimple_rflx.py deleted file mode 100755 --- a/pypy/module/_cppyy/bench/hsimple_rflx.py +++ /dev/null @@ -1,120 +0,0 @@ -#*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* -#*-* -#*-* This program creates : -#*-* - a one dimensional histogram -#*-* - a two dimensional histogram -#*-* - a profile histogram -#*-* - a memory-resident ntuple -#*-* -#*-* These objects are filled with some random numbers and saved on a file. -#*-* -#*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* - -try: - import warnings - warnings.simplefilter("ignore") - - import cppyy, random - cppyy.load_reflection_info('bench02Dict_reflex.so') - - app = cppyy.gbl.Bench02RootApp() - TCanvas = cppyy.gbl.TCanvas - TFile = cppyy.gbl.TFile - TProfile = cppyy.gbl.TProfile - TNtuple = cppyy.gbl.TNtuple - TH1F = cppyy.gbl.TH1F - TH2F = cppyy.gbl.TH2F - TRandom = cppyy.gbl.TRandom -except ImportError: - from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F, TRandom - import random - -import math - -#gROOT = cppyy.gbl.gROOT -#gBenchmark = cppyy.gbl.gBenchmark -#gRandom = cppyy.gbl.gRandom -#gSystem = cppyy.gbl.gSystem - -#gROOT.Reset() - -# Create a new canvas, and customize it. -#c1 = TCanvas( 'c1', 'Dynamic Filling Example', 200, 10, 700, 500 ) -#c1.SetFillColor( 42 ) -#c1.GetFrame().SetFillColor( 21 ) -#c1.GetFrame().SetBorderSize( 6 ) -#c1.GetFrame().SetBorderMode( -1 ) - -# Create a new ROOT binary machine independent file. -# Note that this file may contain any kind of ROOT objects, histograms, -# pictures, graphics objects, detector geometries, tracks, events, etc.. -# This file is now becoming the current directory. - -#hfile = gROOT.FindObject( 'hsimple.root' ) -#if hfile: -# hfile.Close() -#hfile = TFile( 'hsimple.root', 'RECREATE', 'Demo ROOT file with histograms' ) - -# Create some histograms, a profile histogram and an ntuple -hpx = TH1F('hpx', 'This is the px distribution', 100, -4, 4) -hpx.Print() -#hpxpy = TH2F( 'hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4 ) -#hprof = TProfile( 'hprof', 'Profile of pz versus px', 100, -4, 4, 0, 20 ) -#ntuple = TNtuple( 'ntuple', 'Demo ntuple', 'px:py:pz:random:i' ) - -# Set canvas/frame attributes. -#hpx.SetFillColor( 48 ) - -#gBenchmark.Start( 'hsimple' ) - -# Initialize random number generator. -#gRandom.SetSeed() -#rannor, rndm = gRandom.Rannor, gRandom.Rndm - -random = TRandom() -random.SetSeed(0) - -# Fill histograms randomly. -#px, py = Double(), Double() -kUPDATE = 1000 -for i in xrange(2500000): - # Generate random values. -# px, py = random.gauss(0, 1), random.gauss(0, 1) - px, py = random.Gaus(0, 1), random.Gaus(0, 1) -# pt = (px*px + py*py)**0.5 - pt = math.sqrt(px*px + py*py) -# pt = (px*px + py*py) -# random = rndm(1) - - # Fill histograms. - hpx.Fill(pt) -# hpxpyFill( px, py ) -# hprofFill( px, pz ) -# ntupleFill( px, py, pz, random, i ) - - # Update display every kUPDATE events. -# if i and i%kUPDATE == 0: -# if i == kUPDATE: -# hpx.Draw() - -# c1.Modified() -# c1.Update() - -# if gSystem.ProcessEvents(): # allow user interrupt -# break - -#gBenchmark.Show( 'hsimple' ) - -hpx.Print() - -# Save all objects in this file. -#hpx.SetFillColor( 0 ) -#hfile.Write() -#hfile.Close() -#hpx.SetFillColor( 48 ) -#c1.Modified() -#c1.Update() -#c1.Draw() - -# Note that the file is automatically closed when application terminates -# or when the file destructor is called. diff --git a/pypy/module/_cppyy/test/bench1.cxx b/pypy/module/_cppyy/test/bench1.cxx deleted file mode 100644 --- a/pypy/module/_cppyy/test/bench1.cxx +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include -#include -#include - -#include "example01.h" - -static const int NNN = 10000000; - - -int cpp_loop_offset() { - int i = 0; - for ( ; i < NNN*10; ++i) - ; - return i; -} - -int cpp_bench1() { - int i = 0; - example01 e; - for ( ; i < NNN*10; ++i) - e.addDataToInt(i); - return i; -} - - -int main() { - - clock_t t1 = clock(); - cpp_loop_offset(); - clock_t t2 = clock(); - cpp_bench1(); - clock_t t3 = clock(); - - std::cout << std::setprecision(8) - << ((t3-t2) - (t2-t1))/((double)CLOCKS_PER_SEC*10.) << std::endl; - - return 0; -} diff --git a/pypy/module/_cppyy/test/bench1.py b/pypy/module/_cppyy/test/bench1.py deleted file mode 100644 --- a/pypy/module/_cppyy/test/bench1.py +++ /dev/null @@ -1,244 +0,0 @@ -import commands, os, sys, time, math - -from math import atan -NNN = 10000000 - - -def run_bench(bench): - global t_loop_offset, NNN - - t1 = time.time() - bench(NNN) - t2 = time.time() - - t_bench = (t2-t1) - return bench.scale*t_bench-t_loop_offset - -def print_bench(name, t_bench): - global t_cppref - print ':::: %s cost: %#6.3fs (%#4.1fx)' % (name, t_bench, float(t_bench)/t_cppref) - -def python_loop_offset(): - for i in xrange(NNN): - i - return i - -class CPythonBench1(object): - scale = 1 - def __init__(self): - import ROOT - ROOT.gROOT.SetBatch(1) - ROOT.SetSignalPolicy(ROOT.kSignalFast) - import cppyy - self.lib = cppyy.gbl.gSystem.Load("./example01Dict.so") - - self.cls = cppyy.gbl.example01 - self.inst = self.cls(0) - - def __call__(self, repeat): - # TODO: check linearity of actual scaling - instance = self.inst - niter = repeat/self.scale - self.cls.addDataToInt._threaded = True - for i in xrange(niter): - instance.addDataToInt(i) - return i - -class CPythonBench1_Swig(object): - scale = 1 - def __init__(self): - import example - - self.cls = example.example01 - self.inst = self.cls(0) - - def __call__(self, repeat): - # TODO: check linearity of actual scaling - instance = self.inst - niter = repeat/self.scale - for i in xrange(niter): - instance.addDataToInt(i) - return i - - -class PureBench1(object): - scale = 1 - def __init__(self): - class example01(object): - def __init__(self, somedata): - self.m_somedata = somedata - def addDataToInt(self, a): - return self.m_somedata + int(atan(a)) - - self.cls = example01 - self.inst = self.cls(0) - - def __call__(self, repeat): - # TODO: check linearity of actual scaling - instance = self.inst - niter = repeat/self.scale - for i in xrange(niter): - instance.addDataToInt(i) - return i - - -class CppyyInterpBench1(object): - title = "cppyy interp" - scale = 1 - def __init__(self): - import cppyy - self.lib = cppyy.load_reflection_info("./example01Dict.so") - - self.cls = cppyy._scope_byname("example01") - self.inst = self.cls.get_overload(self.cls.type_name).call(None, 0) - - def __call__(self, repeat): - addDataToInt = self.cls.get_overload("addDataToInt") - instance = self.inst - for i in xrange(repeat): - addDataToInt.call(instance, i) - return i - -class CppyyInterpBench2(CppyyInterpBench1): - title = "... overload" - def __call__(self, repeat): - addDataToInt = self.cls.get_overload("overloadedAddDataToInt") - instance = self.inst - for i in xrange(repeat): - addDataToInt.call(instance, i) - return i - -class CppyyInterpBench3(CppyyInterpBench1): - title = "... constref" - def __call__(self, repeat): - addDataToInt = self.cls.get_overload("addDataToIntConstRef") - instance = self.inst - for i in xrange(repeat): - addDataToInt.call(instance, i) - return i - -class CppyyPythonBench1(object): - title = "cppyy python" - scale = 1 - def __init__(self): - import cppyy - self.lib = cppyy.load_reflection_info("./example01Dict.so") - - self.cls = cppyy.gbl.example01 - self.inst = self.cls(0) - - def __call__(self, repeat): - instance = self.inst - for i in xrange(repeat): - instance.addDataToInt(i) - return i - -class CppyyPythonBench2(CppyyPythonBench1): - title = "... objbyval" - def __call__(self, repeat): - import cppyy - pl = cppyy.gbl.payload(3.14) - - instance = self.inst - for i in xrange(repeat): - instance.copyCyclePayload(pl) - return i - -class CppyyPythonBench3(CppyyPythonBench1): - title = "... objbyptr" - def __call__(self, repeat): - import cppyy - pl = cppyy.gbl.payload(3.14) - - instance = self.inst - for i in xrange(repeat): - instance.cyclePayload(pl) - return i - - -if __name__ == '__main__': - python_loop_offset(); - - # time python loop offset - t1 = time.time() - python_loop_offset() - t2 = time.time() - t_loop_offset = t2-t1 - - # special cases for CPython - if '-swig' in sys.argv: - # runs SWIG - cpython_bench1 = CPythonBench1_Swig() - elif '-pure' in sys.argv: - # runs pure python - cpython_bench1 = PureBench1() - elif not 'cppyy' in sys.builtin_module_names: - # runs ROOT/cppyy.py - cpython_bench1 = CPythonBench1() - try: - print run_bench(cpython_bench1) - sys.exit(0) - except NameError: - pass - - # get C++ reference point - if not os.path.exists("bench1.exe") or\ - os.stat("bench1.exe").st_mtime < os.stat("bench1.cxx").st_mtime: - print "rebuilding bench1.exe ... " - # the following is debatable, as pypy-c uses direct function - # pointers, whereas that is only true for virtual functions in - # the case of C++ (by default, anyway, it need not) - # yes, shared library use is what's going on ... -# os.system( "g++ -O2 bench1.cxx example01.cxx -o bench1.exe" ) - os.system( "g++ -O2 bench1.cxx -L. -lexample01Dict -o bench1.exe" ) - stat, cppref = commands.getstatusoutput("./bench1.exe") - t_cppref = float(cppref) - - # create object - benches = [ - CppyyInterpBench1(), CppyyInterpBench2(), CppyyInterpBench3(), - CppyyPythonBench1(), CppyyPythonBench2(), CppyyPythonBench3() ] - - # warm-up - print "warming up ... " - for bench in benches: - bench(2000) - - # to allow some consistency checking - print "C++ reference uses %.3fs" % t_cppref - - # test runs ... - for bench in benches: - print_bench(bench.title, run_bench(bench)) - - stat, t_cpython1 = commands.getstatusoutput("/home/wlav/aditi/pypy/bin/v5/pypy-c bench1.py - -pure") - if stat: - print 'CPython pure bench1 failed:' - os.write(sys.stdout.fileno(), t_cpython1) - print - exit(stat) - print_bench("pypy-c pure ", float(t_cpython1)) - - stat, t_cpython1 = commands.getstatusoutput("python bench1.py - -pure") - if stat: - print 'CPython pure bench1 failed:' - os.write(sys.stdout.fileno(), t_cpython1) - print - exit(stat) - print_bench("CPython pure", float(t_cpython1)) - - stat, t_cpython1 = commands.getstatusoutput("python bench1.py - -b") - if stat: - print 'CPython bench1 failed:' - os.write(sys.stdout.fileno(), t_cpython1) - print - exit(stat) - print_bench("CPython ", float(t_cpython1)) - - #stat, t_cpython1 = commands.getstatusoutput("python bench1.py - -swig") - #if stat: - # print 'SWIG bench1 failed:' - # os.write(sys.stdout.fileno(), t_cpython1) - # print - # exit(stat) - #print_bench("SWIG ", float(t_cpython1))