[pypy-commit] pypy improve-docs: hg merge default
Manuel Jacob
noreply at buildbot.pypy.org
Sat Jul 26 14:49:54 CEST 2014
Author: Manuel Jacob
Branch: improve-docs
Changeset: r72503:9ca2657fe663
Date: 2014-07-26 14:49 +0200
http://bitbucket.org/pypy/pypy/changeset/9ca2657fe663/
Log: hg merge default
diff too long, truncating to 2000 out of 33446 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -10,3 +10,7 @@
20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0
0000000000000000000000000000000000000000 release-2.3.0
394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3
+32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1
+32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1
+32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1
+0000000000000000000000000000000000000000 release-2.2=3.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -128,6 +128,7 @@
Stian Andreassen
Laurence Tratt
Wanja Saatkamp
+ Ivan Sichmann Freitas
Gerald Klix
Mike Blume
Oscar Nierstrasz
@@ -212,7 +213,9 @@
Alejandro J. Cura
Jacob Oscarson
Travis Francis Athougies
+ Ryan Gonzalez
Kristjan Valur Jonsson
+ Sebastian Pawluś
Neil Blakey-Milner
anatoly techtonik
Lutz Paelike
@@ -245,6 +248,7 @@
Michael Hudson-Doyle
Anders Sigfridsson
Yasir Suhail
+ rafalgalczynski at gmail.com
Floris Bruynooghe
Laurens Van Houtven
Akira Li
@@ -274,6 +278,8 @@
Zooko Wilcox-O Hearn
Tomer Chachamu
Christopher Groskopf
+ Asmo Soinio
+ Stefan Marr
jiaaro
opassembler.py
Antony Lee
diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py
--- a/lib-python/2.7/ctypes/__init__.py
+++ b/lib-python/2.7/ctypes/__init__.py
@@ -389,12 +389,13 @@
func.__name__ = name_or_ordinal
return func
-class PyDLL(CDLL):
- """This class represents the Python library itself. It allows to
- access Python API functions. The GIL is not released, and
- Python exceptions are handled correctly.
- """
- _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
+# Not in PyPy
+#class PyDLL(CDLL):
+# """This class represents the Python library itself. It allows to
+# access Python API functions. The GIL is not released, and
+# Python exceptions are handled correctly.
+# """
+# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
if _os.name in ("nt", "ce"):
@@ -447,15 +448,8 @@
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
-pydll = LibraryLoader(PyDLL)
-
-if _os.name in ("nt", "ce"):
- pythonapi = PyDLL("python dll", None, _sys.dllhandle)
-elif _sys.platform == "cygwin":
- pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
-else:
- pythonapi = PyDLL(None)
-
+# not on PyPy
+#pydll = LibraryLoader(PyDLL)
if _os.name in ("nt", "ce"):
windll = LibraryLoader(WinDLL)
diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py
--- a/lib-python/2.7/ctypes/test/test_values.py
+++ b/lib-python/2.7/ctypes/test/test_values.py
@@ -4,6 +4,7 @@
import unittest
from ctypes import *
+from ctypes.test import xfail
import _ctypes_test
@@ -23,7 +24,8 @@
class Win_ValuesTestCase(unittest.TestCase):
"""This test only works when python itself is a dll/shared library"""
-
+
+ @xfail
def test_optimizeflag(self):
# This test accesses the Py_OptimizeFlag intger, which is
# exported by the Python dll.
@@ -40,6 +42,7 @@
else:
self.assertEqual(opt, 2)
+ @xfail
def test_frozentable(self):
# Python exports a PyImport_FrozenModules symbol. This is a
# pointer to an array of struct _frozen entries. The end of the
@@ -75,6 +78,7 @@
from ctypes import _pointer_type_cache
del _pointer_type_cache[struct_frozen]
+ @xfail
def test_undefined(self):
self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol")
diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py
--- a/lib-python/2.7/imputil.py
+++ b/lib-python/2.7/imputil.py
@@ -422,7 +422,8 @@
saved back to the filesystem for future imports. The source file's
modification timestamp must be provided as a Long value.
"""
- codestring = open(pathname, 'rU').read()
+ with open(pathname, 'rU') as fp:
+ codestring = fp.read()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
code = __builtin__.compile(codestring, pathname, 'exec')
@@ -603,8 +604,8 @@
self.desc = desc
def import_file(self, filename, finfo, fqname):
- fp = open(filename, self.desc[1])
- module = imp.load_module(fqname, fp, filename, self.desc)
+ with open(filename, self.desc[1]) as fp:
+ module = imp.load_module(fqname, fp, filename, self.desc)
module.__file__ = filename
return 0, module, { }
diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py
--- a/lib-python/2.7/modulefinder.py
+++ b/lib-python/2.7/modulefinder.py
@@ -109,16 +109,16 @@
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
- fp = open(pathname, READ_MODE)
- stuff = ("", "r", imp.PY_SOURCE)
- self.load_module('__main__', fp, pathname, stuff)
+ with open(pathname, READ_MODE) as fp:
+ stuff = ("", "r", imp.PY_SOURCE)
+ self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
- fp = open(pathname, READ_MODE)
- stuff = (ext, "r", imp.PY_SOURCE)
- self.load_module(name, fp, pathname, stuff)
+ with open(pathname, READ_MODE) as fp:
+ stuff = (ext, "r", imp.PY_SOURCE)
+ self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
@@ -461,6 +461,8 @@
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
+ if fp:
+ fp.close()
return m
def add_module(self, fqname):
diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py
--- a/lib-python/2.7/test/test_argparse.py
+++ b/lib-python/2.7/test/test_argparse.py
@@ -48,6 +48,9 @@
def tearDown(self):
os.chdir(self.old_dir)
+ import gc
+ # Force a collection which should close FileType() options
+ gc.collect()
for root, dirs, files in os.walk(self.temp_dir, topdown=False):
for name in files:
os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE)
diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py
--- a/lib-python/2.7/test/test_gdbm.py
+++ b/lib-python/2.7/test/test_gdbm.py
@@ -98,6 +98,17 @@
self.assertTrue(key in self.g)
self.assertTrue(self.g.has_key(key))
+ def test_unicode_key(self):
+ key = u'ab'
+ value = u'cd'
+ self.g = gdbm.open(filename, 'cf')
+ self.g[key] = value
+ self.g.close()
+ self.g = gdbm.open(filename, 'r')
+ self.assertEquals(self.g[key], value)
+ self.assertTrue(key in self.g)
+ self.assertTrue(self.g.has_key(key))
+
def test_main():
run_unittest(TestGdbm)
diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py
--- a/lib-python/2.7/timeit.py
+++ b/lib-python/2.7/timeit.py
@@ -55,11 +55,6 @@
import gc
import sys
import time
-try:
- import itertools
-except ImportError:
- # Must be an older Python version (see timeit() below)
- itertools = None
__all__ = ["Timer"]
@@ -81,7 +76,8 @@
def inner(_it, _timer):
%(setup)s
_t0 = _timer()
- for _i in _it:
+ while _it > 0:
+ _it -= 1
%(stmt)s
_t1 = _timer()
return _t1 - _t0
@@ -96,7 +92,8 @@
def inner(_it, _timer, _func=func):
setup()
_t0 = _timer()
- for _i in _it:
+ while _it > 0:
+ _it -= 1
_func()
_t1 = _timer()
return _t1 - _t0
@@ -133,9 +130,19 @@
else:
raise ValueError("setup is neither a string nor callable")
self.src = src # Save for traceback display
- code = compile(src, dummy_src_name, "exec")
- exec code in globals(), ns
- self.inner = ns["inner"]
+ def make_inner():
+ # PyPy tweak: recompile the source code each time before
+ # calling inner(). There are situations like Issue #1776
+ # where PyPy tries to reuse the JIT code from before,
+ # but that's not going to work: the first thing the
+ # function does is the "-s" statement, which may declare
+ # new classes (here a namedtuple). We end up with
+ # bridges from the inner loop; more and more of them
+ # every time we call inner().
+ code = compile(src, dummy_src_name, "exec")
+ exec code in globals(), ns
+ return ns["inner"]
+ self.make_inner = make_inner
elif hasattr(stmt, '__call__'):
self.src = None
if isinstance(setup, basestring):
@@ -144,7 +151,8 @@
exec _setup in globals(), ns
elif not hasattr(setup, '__call__'):
raise ValueError("setup is neither a string nor callable")
- self.inner = _template_func(setup, stmt)
+ inner = _template_func(setup, stmt)
+ self.make_inner = lambda: inner
else:
raise ValueError("stmt is neither a string nor callable")
@@ -185,15 +193,12 @@
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
- if itertools:
- it = itertools.repeat(None, number)
- else:
- it = [None] * number
+ inner = self.make_inner()
gcold = gc.isenabled()
if '__pypy__' not in sys.builtin_module_names:
gc.disable() # only do that on CPython
try:
- timing = self.inner(it, self.timer)
+ timing = inner(number, self.timer)
finally:
if gcold:
gc.enable()
diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py
--- a/lib-python/2.7/xml/sax/saxutils.py
+++ b/lib-python/2.7/xml/sax/saxutils.py
@@ -98,13 +98,14 @@
except AttributeError:
pass
# wrap a binary writer with TextIOWrapper
- class UnbufferedTextIOWrapper(io.TextIOWrapper):
- def write(self, s):
- super(UnbufferedTextIOWrapper, self).write(s)
- self.flush()
- return UnbufferedTextIOWrapper(buffer, encoding=encoding,
+ return _UnbufferedTextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n')
+# PyPy: moved this class outside the function above
+class _UnbufferedTextIOWrapper(io.TextIOWrapper):
+ def write(self, s):
+ super(_UnbufferedTextIOWrapper, self).write(s)
+ self.flush()
class XMLGenerator(handler.ContentHandler):
diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
--- a/lib_pypy/_ctypes/function.py
+++ b/lib_pypy/_ctypes/function.py
@@ -219,6 +219,8 @@
if restype is None:
import ctypes
restype = ctypes.c_int
+ if self._argtypes_ is None:
+ self._argtypes_ = []
self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype)
self._check_argtypes_for_fastpath()
return
diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py
--- a/lib_pypy/_curses.py
+++ b/lib_pypy/_curses.py
@@ -309,11 +309,9 @@
#endif
int _m_ispad(WINDOW *win) {
-#if defined WINDOW_HAS_FLAGS
+ // <curses.h> may not have _flags (and possibly _ISPAD),
+ // but for now let's assume that <ncurses.h> always has it
return (win->_flags & _ISPAD);
-#else
- return 0;
-#endif
}
void _m_getsyx(int *yx) {
diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py
--- a/lib_pypy/_pypy_testcapi.py
+++ b/lib_pypy/_pypy_testcapi.py
@@ -13,7 +13,15 @@
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
- output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2)
+ try:
+ username = os.environ['USER'] #linux, et al
+ except KeyError:
+ try:
+ username = os.environ['USERNAME'] #windows
+ except KeyError:
+ username = os.getuid()
+ output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % (
+ username, k1, k2)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
return output_dir
diff --git a/lib_pypy/_tkinter/license.terms b/lib_pypy/_tkinter/license.terms
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_tkinter/license.terms
@@ -0,0 +1,39 @@
+This software is copyrighted by the Regents of the University of
+California, Sun Microsystems, Inc., and other parties. The following
+terms apply to all files associated with the software unless explicitly
+disclaimed in individual files.
+
+The authors hereby grant permission to use, copy, modify, distribute,
+and license this software and its documentation for any purpose, provided
+that existing copyright notices are retained in all copies and that this
+notice is included verbatim in any distributions. No written agreement,
+license, or royalty fee is required for any of the authorized uses.
+Modifications to this software may be copyrighted by their authors
+and need not follow the licensing terms described here, provided that
+the new terms are clearly indicated on the first page of each file where
+they apply.
+
+IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
+FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
+DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
+NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+MODIFICATIONS.
+
+GOVERNMENT USE: If you are acquiring this software on behalf of the
+U.S. government, the Government shall have only "Restricted Rights"
+in the software and related documentation as defined in the Federal
+Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+are acquiring the software on behalf of the Department of Defense, the
+software shall be classified as "Commercial Computer Software" and the
+Government shall have only "Restricted Rights" as defined in Clause
+252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
+authors grant the U.S. Government and others acting in its behalf
+permission to use and distribute the software in accordance with the
+terms specified in this license.
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,5 +4,5 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "0.8.2"
-__version_info__ = (0, 8, 2)
+__version__ = "0.8.6"
+__version_info__ = (0, 8, 6)
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -55,8 +55,7 @@
# _cffi_backend.so compiled.
import _cffi_backend as backend
from . import __version__
- assert (backend.__version__ == __version__ or
- backend.__version__ == __version__[:3])
+ assert backend.__version__ == __version__
# (If you insist you can also try to pass the option
# 'backend=backend_ctypes.CTypesBackend()', but don't
# rely on it! It's probably not going to work well.)
@@ -443,6 +442,10 @@
for enumname, enumval in zip(tp.enumerators, tp.enumvalues):
if enumname not in library.__dict__:
library.__dict__[enumname] = enumval
+ for key, val in ffi._parser._int_constants.items():
+ if key not in library.__dict__:
+ library.__dict__[key] = val
+
copied_enums.append(True)
if name in library.__dict__:
return
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -24,6 +24,7 @@
_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]")
_r_words = re.compile(r"\w+|\S")
_parser_cache = None
+_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE)
def _get_parser():
global _parser_cache
@@ -99,6 +100,7 @@
self._structnode2type = weakref.WeakKeyDictionary()
self._override = False
self._packed = False
+ self._int_constants = {}
def _parse(self, csource):
csource, macros = _preprocess(csource)
@@ -128,9 +130,10 @@
finally:
if lock is not None:
lock.release()
- return ast, macros
+ # csource will be used to find buggy source text
+ return ast, macros, csource
- def convert_pycparser_error(self, e, csource):
+ def _convert_pycparser_error(self, e, csource):
# xxx look for ":NUM:" at the start of str(e) and try to interpret
# it as a line number
line = None
@@ -142,6 +145,12 @@
csourcelines = csource.splitlines()
if 1 <= linenum <= len(csourcelines):
line = csourcelines[linenum-1]
+ return line
+
+ def convert_pycparser_error(self, e, csource):
+ line = self._convert_pycparser_error(e, csource)
+
+ msg = str(e)
if line:
msg = 'cannot parse "%s"\n%s' % (line.strip(), msg)
else:
@@ -160,14 +169,9 @@
self._packed = prev_packed
def _internal_parse(self, csource):
- ast, macros = self._parse(csource)
+ ast, macros, csource = self._parse(csource)
# add the macros
- for key, value in macros.items():
- value = value.strip()
- if value != '...':
- raise api.CDefError('only supports the syntax "#define '
- '%s ..." for now (literally)' % key)
- self._declare('macro ' + key, value)
+ self._process_macros(macros)
# find the first "__dotdotdot__" and use that as a separator
# between the repeated typedefs and the real csource
iterator = iter(ast.ext)
@@ -175,27 +179,61 @@
if decl.name == '__dotdotdot__':
break
#
- for decl in iterator:
- if isinstance(decl, pycparser.c_ast.Decl):
- self._parse_decl(decl)
- elif isinstance(decl, pycparser.c_ast.Typedef):
- if not decl.name:
- raise api.CDefError("typedef does not declare any name",
- decl)
- if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType)
- and decl.type.type.names == ['__dotdotdot__']):
- realtype = model.unknown_type(decl.name)
- elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and
- isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and
- isinstance(decl.type.type.type,
- pycparser.c_ast.IdentifierType) and
- decl.type.type.type.names == ['__dotdotdot__']):
- realtype = model.unknown_ptr_type(decl.name)
+ try:
+ for decl in iterator:
+ if isinstance(decl, pycparser.c_ast.Decl):
+ self._parse_decl(decl)
+ elif isinstance(decl, pycparser.c_ast.Typedef):
+ if not decl.name:
+ raise api.CDefError("typedef does not declare any name",
+ decl)
+ if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType)
+ and decl.type.type.names == ['__dotdotdot__']):
+ realtype = model.unknown_type(decl.name)
+ elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and
+ isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and
+ isinstance(decl.type.type.type,
+ pycparser.c_ast.IdentifierType) and
+ decl.type.type.type.names == ['__dotdotdot__']):
+ realtype = model.unknown_ptr_type(decl.name)
+ else:
+ realtype = self._get_type(decl.type, name=decl.name)
+ self._declare('typedef ' + decl.name, realtype)
else:
- realtype = self._get_type(decl.type, name=decl.name)
- self._declare('typedef ' + decl.name, realtype)
+ raise api.CDefError("unrecognized construct", decl)
+ except api.FFIError as e:
+ msg = self._convert_pycparser_error(e, csource)
+ if msg:
+ e.args = (e.args[0] + "\n *** Err: %s" % msg,)
+ raise
+
+ def _add_constants(self, key, val):
+ if key in self._int_constants:
+ raise api.FFIError(
+ "multiple declarations of constant: %s" % (key,))
+ self._int_constants[key] = val
+
+ def _process_macros(self, macros):
+ for key, value in macros.items():
+ value = value.strip()
+ match = _r_int_literal.search(value)
+ if match is not None:
+ int_str = match.group(0).lower().rstrip("ul")
+
+ # "010" is not valid oct in py3
+ if (int_str.startswith("0") and
+ int_str != "0" and
+ not int_str.startswith("0x")):
+ int_str = "0o" + int_str[1:]
+
+ pyvalue = int(int_str, 0)
+ self._add_constants(key, pyvalue)
+ elif value == '...':
+ self._declare('macro ' + key, value)
else:
- raise api.CDefError("unrecognized construct", decl)
+ raise api.CDefError('only supports the syntax "#define '
+ '%s ..." (literally) or "#define '
+ '%s 0x1FF" for now' % (key, key))
def _parse_decl(self, decl):
node = decl.type
@@ -227,7 +265,7 @@
self._declare('variable ' + decl.name, tp)
def parse_type(self, cdecl):
- ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)
+ ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2]
assert not macros
exprnode = ast.ext[-1].type.args.params[0]
if isinstance(exprnode, pycparser.c_ast.ID):
@@ -306,7 +344,8 @@
if ident == 'void':
return model.void_type
if ident == '__dotdotdot__':
- raise api.FFIError('bad usage of "..."')
+ raise api.FFIError(':%d: bad usage of "..."' %
+ typenode.coord.line)
return resolve_common_type(ident)
#
if isinstance(type, pycparser.c_ast.Struct):
@@ -333,7 +372,8 @@
return self._get_struct_union_enum_type('union', typenode, name,
nested=True)
#
- raise api.FFIError("bad or unsupported type declaration")
+ raise api.FFIError(":%d: bad or unsupported type declaration" %
+ typenode.coord.line)
def _parse_function_type(self, typenode, funcname=None):
params = list(getattr(typenode.args, 'params', []))
@@ -499,6 +539,10 @@
if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and
exprnode.op == '-'):
return -self._parse_constant(exprnode.expr)
+ # load previously defined int constant
+ if (isinstance(exprnode, pycparser.c_ast.ID) and
+ exprnode.name in self._int_constants):
+ return self._int_constants[exprnode.name]
#
if partial_length_ok:
if (isinstance(exprnode, pycparser.c_ast.ID) and
@@ -506,8 +550,8 @@
self._partial_length = True
return '...'
#
- raise api.FFIError("unsupported expression: expected a "
- "simple numeric constant")
+ raise api.FFIError(":%d: unsupported expression: expected a "
+ "simple numeric constant" % exprnode.coord.line)
def _build_enum_type(self, explicit_name, decls):
if decls is not None:
@@ -522,6 +566,7 @@
if enum.value is not None:
nextenumvalue = self._parse_constant(enum.value)
enumvalues.append(nextenumvalue)
+ self._add_constants(enum.name, nextenumvalue)
nextenumvalue += 1
enumvalues = tuple(enumvalues)
tp = model.EnumType(explicit_name, enumerators, enumvalues)
@@ -535,3 +580,5 @@
kind = name.split(' ', 1)[0]
if kind in ('typedef', 'struct', 'union', 'enum'):
self._declare(name, tp)
+ for k, v in other._int_constants.items():
+ self._add_constants(k, v)
diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py
--- a/lib_pypy/cffi/ffiplatform.py
+++ b/lib_pypy/cffi/ffiplatform.py
@@ -38,6 +38,7 @@
import distutils.errors
#
dist = Distribution({'ext_modules': [ext]})
+ dist.parse_config_files()
options = dist.get_option_dict('build_ext')
options['force'] = ('ffiplatform', True)
options['build_lib'] = ('ffiplatform', tmpdir)
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -89,43 +89,54 @@
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
- prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},')
- prnt(' {NULL, NULL} /* Sentinel */')
+ prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
+ prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
- if sys.version_info >= (3,):
- prnt('static struct PyModuleDef _cffi_module_def = {')
- prnt(' PyModuleDef_HEAD_INIT,')
- prnt(' "%s",' % modname)
- prnt(' NULL,')
- prnt(' -1,')
- prnt(' _cffi_methods,')
- prnt(' NULL, NULL, NULL, NULL')
- prnt('};')
- prnt()
- initname = 'PyInit_%s' % modname
- createmod = 'PyModule_Create(&_cffi_module_def)'
- errorcase = 'return NULL'
- finalreturn = 'return lib'
- else:
- initname = 'init%s' % modname
- createmod = 'Py_InitModule("%s", _cffi_methods)' % modname
- errorcase = 'return'
- finalreturn = 'return'
+ constants = self._chained_list_constants[False]
+ prnt('#if PY_MAJOR_VERSION >= 3')
+ prnt()
+ prnt('static struct PyModuleDef _cffi_module_def = {')
+ prnt(' PyModuleDef_HEAD_INIT,')
+ prnt(' "%s",' % modname)
+ prnt(' NULL,')
+ prnt(' -1,')
+ prnt(' _cffi_methods,')
+ prnt(' NULL, NULL, NULL, NULL')
+ prnt('};')
+ prnt()
prnt('PyMODINIT_FUNC')
- prnt('%s(void)' % initname)
+ prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
- prnt(' lib = %s;' % createmod)
- prnt(' if (lib == NULL || %s < 0)' % (
- self._chained_list_constants[False],))
- prnt(' %s;' % errorcase)
- prnt(' _cffi_init();')
- prnt(' %s;' % finalreturn)
+ prnt(' lib = PyModule_Create(&_cffi_module_def);')
+ prnt(' if (lib == NULL)')
+ prnt(' return NULL;')
+ prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
+ prnt(' Py_DECREF(lib);')
+ prnt(' return NULL;')
+ prnt(' }')
+ prnt(' return lib;')
prnt('}')
+ prnt()
+ prnt('#else')
+ prnt()
+ prnt('PyMODINIT_FUNC')
+ prnt('init%s(void)' % modname)
+ prnt('{')
+ prnt(' PyObject *lib;')
+ prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
+ prnt(' if (lib == NULL)')
+ prnt(' return;')
+ prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
+ prnt(' return;')
+ prnt(' return;')
+ prnt('}')
+ prnt()
+ prnt('#endif')
def load_library(self):
# XXX review all usages of 'self' here!
@@ -394,7 +405,7 @@
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
- self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth))
+ self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
@@ -481,8 +492,8 @@
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
- self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname,
- layoutfuncname))
+ self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
+ layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
@@ -589,13 +600,7 @@
'variable type'),))
assert delayed
else:
- prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name))
- prnt(' o = PyInt_FromLong((long)(%s));' % (name,))
- prnt(' else if ((%s) <= 0)' % (name,))
- prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,))
- prnt(' else')
- prnt(' o = PyLong_FromUnsignedLongLong('
- '(unsigned long long)(%s));' % (name,))
+ prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
@@ -632,13 +637,18 @@
# ----------
# enums
+ def _enum_funcname(self, prefix, name):
+ # "$enum_$1" => "___D_enum____D_1"
+ name = name.replace('$', '___D_')
+ return '_cffi_e_%s_%s' % (prefix, name)
+
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
- funcname = '_cffi_e_%s_%s' % (prefix, name)
+ funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
@@ -760,17 +770,30 @@
#include <Python.h>
#include <stddef.h>
-#ifdef MS_WIN32
-#include <malloc.h> /* for alloca() */
-typedef __int8 int8_t;
-typedef __int16 int16_t;
-typedef __int32 int32_t;
-typedef __int64 int64_t;
-typedef unsigned __int8 uint8_t;
-typedef unsigned __int16 uint16_t;
-typedef unsigned __int32 uint32_t;
-typedef unsigned __int64 uint64_t;
-typedef unsigned char _Bool;
+/* this block of #ifs should be kept exactly identical between
+ c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
+#if defined(_MSC_VER)
+# include <malloc.h> /* for alloca() */
+# if _MSC_VER < 1600 /* MSVC < 2010 */
+ typedef __int8 int8_t;
+ typedef __int16 int16_t;
+ typedef __int32 int32_t;
+ typedef __int64 int64_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+ typedef unsigned __int64 uint64_t;
+# else
+# include <stdint.h>
+# endif
+# if _MSC_VER < 1800 /* MSVC < 2013 */
+ typedef unsigned char _Bool;
+# endif
+#else
+# include <stdint.h>
+# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
+# include <alloca.h>
+# endif
#endif
#if PY_MAJOR_VERSION < 3
@@ -795,6 +818,15 @@
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
+#define _cffi_from_c_int_const(x) \
+ (((x) > 0) ? \
+ ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
+ PyInt_FromLong((long)(x)) : \
+ PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
+ ((long long)(x) >= (long long)LONG_MIN) ? \
+ PyInt_FromLong((long)(x)) : \
+ PyLong_FromLongLong((long long)(x)))
+
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \
@@ -804,14 +836,14 @@
PyLong_FromLongLong(x)))
#define _cffi_to_c_int(o, type) \
- (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \
- : _cffi_to_c_i8(o)) : \
- sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \
- : _cffi_to_c_i16(o)) : \
- sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \
- : _cffi_to_c_i32(o)) : \
- sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \
- : _cffi_to_c_i64(o)) : \
+ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
+ : (type)_cffi_to_c_i8(o)) : \
+ sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
+ : (type)_cffi_to_c_i16(o)) : \
+ sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
+ : (type)_cffi_to_c_i32(o)) : \
+ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
+ : (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), 0))
#define _cffi_to_c_i8 \
@@ -885,25 +917,32 @@
return PyBool_FromLong(was_alive);
}
-static void _cffi_init(void)
+static int _cffi_init(void)
{
- PyObject *module = PyImport_ImportModule("_cffi_backend");
- PyObject *c_api_object;
+ PyObject *module, *c_api_object = NULL;
+ module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
- return;
+ goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
- return;
+ goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
- Py_DECREF(c_api_object);
PyErr_SetNone(PyExc_ImportError);
- return;
+ goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
+
+ Py_DECREF(module);
Py_DECREF(c_api_object);
+ return 0;
+
+ failure:
+ Py_XDECREF(module);
+ Py_XDECREF(c_api_object);
+ return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py
--- a/lib_pypy/cffi/vengine_gen.py
+++ b/lib_pypy/cffi/vengine_gen.py
@@ -249,10 +249,10 @@
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
self.export_symbols.append(layoutfuncname)
- prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,))
+ prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
- prnt(' static ssize_t nums[] = {')
+ prnt(' static intptr_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
@@ -276,7 +276,7 @@
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
- BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0]
+ BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0]
function = module.load_function(BFunc, layoutfuncname)
layout = []
num = 0
@@ -410,13 +410,18 @@
# ----------
# enums
+ def _enum_funcname(self, prefix, name):
+ # "$enum_$1" => "___D_enum____D_1"
+ name = name.replace('$', '___D_')
+ return '_cffi_e_%s_%s' % (prefix, name)
+
def _generate_gen_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_gen_const(True, enumerator)
return
#
- funcname = '_cffi_e_%s_%s' % (prefix, name)
+ funcname = self._enum_funcname(prefix, name)
self.export_symbols.append(funcname)
prnt = self._prnt
prnt('int %s(char *out_error)' % funcname)
@@ -430,14 +435,14 @@
enumerator, enumerator, enumvalue))
prnt(' char buf[64];')
prnt(' if ((%s) < 0)' % enumerator)
- prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator)
+ prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator)
prnt(' else')
- prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
+ prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' %
enumerator)
- prnt(' snprintf(out_error, 255,'
+ prnt(' sprintf(out_error,'
' "%s has the real value %s, not %s",')
prnt(' "%s", buf, "%d");' % (
- enumerator, enumvalue))
+ enumerator[:100], enumvalue))
prnt(' return -1;')
prnt(' }')
prnt(' return 0;')
@@ -453,7 +458,7 @@
else:
BType = self.ffi._typeof_locked("char[]")[0]
BFunc = self.ffi._typeof_locked("int(*)(char*)")[0]
- funcname = '_cffi_e_%s_%s' % (prefix, name)
+ funcname = self._enum_funcname(prefix, name)
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType, 256)
if function(p) < 0:
@@ -547,20 +552,29 @@
#include <errno.h>
#include <sys/types.h> /* XXX for ssize_t on some platforms */
-#ifdef _WIN32
-# include <Windows.h>
-# define snprintf _snprintf
-typedef __int8 int8_t;
-typedef __int16 int16_t;
-typedef __int32 int32_t;
-typedef __int64 int64_t;
-typedef unsigned __int8 uint8_t;
-typedef unsigned __int16 uint16_t;
-typedef unsigned __int32 uint32_t;
-typedef unsigned __int64 uint64_t;
-typedef SSIZE_T ssize_t;
-typedef unsigned char _Bool;
+/* this block of #ifs should be kept exactly identical between
+ c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
+#if defined(_MSC_VER)
+# include <malloc.h> /* for alloca() */
+# if _MSC_VER < 1600 /* MSVC < 2010 */
+ typedef __int8 int8_t;
+ typedef __int16 int16_t;
+ typedef __int32 int32_t;
+ typedef __int64 int64_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+ typedef unsigned __int64 uint64_t;
+# else
+# include <stdint.h>
+# endif
+# if _MSC_VER < 1800 /* MSVC < 2013 */
+ typedef unsigned char _Bool;
+# endif
#else
-# include <stdint.h>
+# include <stdint.h>
+# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
+# include <alloca.h>
+# endif
#endif
'''
diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py
--- a/lib_pypy/gdbm.py
+++ b/lib_pypy/gdbm.py
@@ -50,6 +50,8 @@
pass
def _fromstr(key):
+ if isinstance(key, unicode):
+ key = key.encode("ascii")
if not isinstance(key, str):
raise TypeError("gdbm mappings have string indices only")
return {'dptr': ffi.new("char[]", key), 'dsize': len(key)}
@@ -71,8 +73,8 @@
def _raise_from_errno(self):
if ffi.errno:
- raise error(os.strerror(ffi.errno))
- raise error(lib.gdbm_strerror(lib.gdbm_errno))
+ raise error(ffi.errno, os.strerror(ffi.errno))
+ raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno))
def __len__(self):
if self.size < 0:
@@ -141,7 +143,7 @@
def _check_closed(self):
if not self.ll_dbm:
- raise error("GDBM object has already been closed")
+ raise error(0, "GDBM object has already been closed")
__del__ = close
@@ -159,7 +161,7 @@
elif flags[0] == 'n':
iflags = lib.GDBM_NEWDB
else:
- raise error("First flag must be one of 'r', 'w', 'c' or 'n'")
+ raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'")
for flag in flags[1:]:
if flag == 'f':
iflags |= lib.GDBM_FAST
@@ -168,7 +170,7 @@
elif flag == 'u':
iflags |= lib.GDBM_NOLOCK
else:
- raise error("Flag '%s' not supported" % flag)
+ raise error(0, "Flag '%s' not supported" % flag)
return gdbm(filename, iflags, mode)
open_flags = "rwcnfsu"
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -113,7 +113,7 @@
try:
for name in modlist:
__import__(name)
- except (ImportError, CompilationError, py.test.skip.Exception), e:
+ except (ImportError, CompilationError, py.test.skip.Exception) as e:
errcls = e.__class__.__name__
raise Exception(
"The module %r is disabled\n" % (modname,) +
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -101,7 +101,7 @@
while True:
try:
w_key = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise # re-raise other app-level exceptions
break
@@ -271,7 +271,7 @@
try:
...
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_XxxError):
raise
...
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -25,13 +25,13 @@
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
- try:
- import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
- html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
- except ImportError:
- print('sphinx_rtd_theme is not installed')
- html_theme = 'default'
+ try:
+ import sphinx_rtd_theme
+ html_theme = 'sphinx_rtd_theme'
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+ except ImportError:
+ print('sphinx_rtd_theme is not installed')
+ html_theme = 'default'
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt
--- a/pypy/doc/config/translation.log.txt
+++ b/pypy/doc/config/translation.log.txt
@@ -2,4 +2,4 @@
These must be enabled by setting the PYPYLOG environment variable.
The exact set of features supported by PYPYLOG is described in
-pypy/translation/c/src/debug_print.h.
+rpython/translator/c/src/debug_print.h.
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -98,6 +98,7 @@
Stian Andreassen
Laurence Tratt
Wanja Saatkamp
+ Ivan Sichmann Freitas
Gerald Klix
Mike Blume
Oscar Nierstrasz
@@ -182,7 +183,9 @@
Alejandro J. Cura
Jacob Oscarson
Travis Francis Athougies
+ Ryan Gonzalez
Kristjan Valur Jonsson
+ Sebastian Pawluś
Neil Blakey-Milner
anatoly techtonik
Lutz Paelike
@@ -215,6 +218,7 @@
Michael Hudson-Doyle
Anders Sigfridsson
Yasir Suhail
+ rafalgalczynski at gmail.com
Floris Bruynooghe
Laurens Van Houtven
Akira Li
@@ -244,6 +248,8 @@
Zooko Wilcox-O Hearn
Tomer Chachamu
Christopher Groskopf
+ Asmo Soinio
+ Stefan Marr
jiaaro
opassembler.py
Antony Lee
diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst
--- a/pypy/doc/ctypes-implementation.rst
+++ b/pypy/doc/ctypes-implementation.rst
@@ -74,13 +74,11 @@
Here is a list of the limitations and missing features of the
current implementation:
-* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer
- of PyPy, at your own risks and without doing anything sensible about
- the GIL. Since PyPy 2.3, these functions are also named with an extra
- "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this,
- but it might more or less work in simple cases if you do. (Obviously,
- assuming the PyObject pointers you get have any particular fields in
- any particular order is just going to crash.)
+* ``ctypes.pythonapi`` is missing. In previous versions, it was present
+ and redirected to the `cpyext` C API emulation layer, but our
+ implementation did not do anything sensible about the GIL and the
+ functions were named with an extra "Py", for example
+ ``PyPyInt_FromLong()``. It was removed for being unhelpful.
* We copy Python strings instead of having pointers to raw buffers
diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst
--- a/pypy/doc/extradoc.rst
+++ b/pypy/doc/extradoc.rst
@@ -6,6 +6,9 @@
*Articles about PyPy published so far, most recent first:* (bibtex_ file)
+* `A Way Forward in Parallelising Dynamic Languages`_,
+ R. Meier, A. Rigo
+
* `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_,
C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo
@@ -69,6 +72,7 @@
.. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib
+.. _A Way Forward in Parallelising Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf
.. _Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf
.. _Allocation Removal by Partial Evaluation in a Tracing JIT: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf
.. _Towards a Jitting VM for Prolog Execution: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf
@@ -91,6 +95,11 @@
Talks and Presentations
-----------------------
+*This part is no longer updated.* The complete list is here__ (in
+alphabetical order).
+
+.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/
+
Talks in 2010
~~~~~~~~~~~~~
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -150,14 +150,18 @@
You might be interested in our `benchmarking site`_ and our
:doc:`jit documentation <rpython:jit/index>`.
-Note that the JIT has a very high warm-up cost, meaning that the
-programs are slow at the beginning. If you want to compare the timings
-with CPython, even relatively simple programs need to run *at least* one
-second, preferrably at least a few seconds. Large, complicated programs
-need even more time to warm-up the JIT.
+`Your tests are not a benchmark`_: tests tend to be slow under PyPy
+because they run exactly once; if they are good tests, they exercise
+various corner cases in your code. This is a bad case for JIT
+compilers. Note also that our JIT has a very high warm-up cost, meaning
+that any program is slow at the beginning. If you want to compare the
+timings with CPython, even relatively simple programs need to run *at
+least* one second, preferrably at least a few seconds. Large,
+complicated programs need even more time to warm-up the JIT.
.. _benchmarking site: http://speed.pypy.org
+.. _your tests are not a benchmark: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/
Couldn't the JIT dump and reload already-compiled machine code?
---------------------------------------------------------------
diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst
--- a/pypy/doc/jit-hooks.rst
+++ b/pypy/doc/jit-hooks.rst
@@ -34,7 +34,7 @@
aborted due to some reason.
The hook will be invoked with the siagnture:
- ``hook(jitdriver_name, greenkey, reason)``
+ ``hook(jitdriver_name, greenkey, reason, oplist)``
Reason is a string, the meaning of other arguments is the same
as attributes on JitLoopInfo object
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -95,13 +95,12 @@
``PYPYLOG``
If set to a non-empty value, enable logging, the format is:
- *fname*
+ *fname* or *+fname*
logging for profiling: includes all
``debug_start``/``debug_stop`` but not any nested
``debug_print``.
*fname* can be ``-`` to log to *stderr*.
- Note that using a : in fname is a bad idea, Windows
- users, beware.
+ The *+fname* form can be used if there is a *:* in fname
``:``\ *fname*
Full logging, including ``debug_print``.
diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst
--- a/pypy/doc/objspace-proxies.rst
+++ b/pypy/doc/objspace-proxies.rst
@@ -24,6 +24,16 @@
Transparent Proxies
-------------------
+.. warning::
+
+ This is a feature that was tried experimentally long ago, and we
+ found no really good use cases. The basic functionality is still
+ there, but we don't recommend using it. Some of the examples below
+ might not work any more (e.g. you can't tproxy a list object any
+ more). The rest can be done by hacking in standard Python. If
+ anyone is interested in working on tproxy again, he is welcome, but
+ we don't regard this as an interesting extension.
+
PyPy's Transparent Proxies allow routing of operations on objects
to a callable. Application-level code can customize objects without
interfering with the type system - ``type(proxied_list) is list`` holds true
diff --git a/pypy/doc/release-2.3.1.rst b/pypy/doc/release-2.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-2.3.1.rst
@@ -0,0 +1,81 @@
+=================================================
+PyPy 2.3.1 - Terrestrial Arthropod Trap Revisited
+=================================================
+
+We're pleased to announce PyPy 2.3.1, a feature-and-bugfix improvement over our
+recent release last month.
+
+This release contains several bugfixes and enhancements.
+
+You can download the PyPy 2.3.1 release here:
+
+ http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project, and for those who donate to our three sub-projects.
+We've shown quite a bit of progress
+but we're slowly running out of funds.
+Please consider donating more, or even better convince your employer to donate,
+so we can finish those projects! The three sub-projects are:
+
+* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.3 is imminent.
+
+* `STM`_ (software transactional memory): a preview will be released very soon,
+ once we fix a few bugs
+
+* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_
+
+.. _`Py3k`: http://pypy.org/py3donate.html
+.. _`STM`: http://pypy.org/tmdonate2.html
+.. _`NumPy`: http://pypy.org/numpydonate.html
+.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison;
+note that cpython's speed has not changed since 2.7.2)
+due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows,
+and OpenBSD,
+as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux.
+
+While we support 32 bit python on Windows, work on the native Windows 64
+bit python is still stalling, we would welcome a volunteer
+to `handle that`_.
+
+.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org
+.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation
+
+Highlights
+==========
+
+Issues with the 2.3 release were resolved after being reported by users to
+our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+#pypy. Here is a summary of the user-facing changes;
+for more information see `whats-new`_:
+
+* The built-in ``struct`` module was renamed to ``_struct``, solving issues
+ with IDLE and other modules.
+
+* Support for compilation with gcc-4.9
+
+* A rewrite of packaging.py which produces our downloadable packages to
+ modernize command line argument handling and to document third-party
+ contributions in our LICENSE file
+
+* A CFFI-based version of the gdbm module is now included in our downloads
+
+* Many issues were resolved_ since the 2.3 release on May 8
+
+.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html
+.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved
+Please try it out and let us know what you think. We especially welcome
+success stories, we know you are using PyPy, please tell us about it!
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/release-pypy3-2.3.1.rst b/pypy/doc/release-pypy3-2.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy3-2.3.1.rst
@@ -0,0 +1,69 @@
+=====================
+PyPy3 2.3.1 - Fulcrum
+=====================
+
+We're pleased to announce the first stable release of PyPy3. PyPy3
+targets Python 3 (3.2.5) compatibility.
+
+We would like to thank all of the people who donated_ to the `py3k proposal`_
+for supporting the work that went into this.
+
+You can download the PyPy3 2.3.1 release here:
+
+ http://pypy.org/download.html#pypy3-2-3-1
+
+Highlights
+==========
+
+* The first stable release of PyPy3: support for Python 3!
+
+* The stdlib has been updated to Python 3.2.5
+
+* Additional support for the u'unicode' syntax (`PEP 414`_) from Python 3.3
+
+* Updates from the default branch, such as incremental GC and various JIT
+ improvements
+
+* Resolved some notable JIT performance regressions from PyPy2:
+
+ - Re-enabled the previously disabled collection (list/dict/set) strategies
+
+ - Resolved performance of iteration over range objects
+
+ - Resolved handling of Python 3's exception __context__ unnecessarily forcing
+ frame object overhead
+
+.. _`PEP 414`: http://legacy.python.org/dev/peps/pep-0414/
+
+What is PyPy?
+==============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7.6 or 3.2.5. It's fast due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows,
+and OpenBSD,
+as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux.
+
+While we support 32 bit python on Windows, work on the native Windows 64
+bit python is still stalling, we would welcome a volunteer
+to `handle that`_.
+
+.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation
+
+How to use PyPy?
+=================
+
+We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv
+installed, you can follow instructions from `pypy documentation`_ on how
+to proceed. This document also covers other `installation schemes`_.
+
+.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html
+.. _`py3k proposal`: http://pypy.org/py3donate.html
+.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv
+.. _`virtualenv`: http://www.virtualenv.org/en/latest/
+.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy
+
+
+Cheers,
+the PyPy team
diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst
--- a/pypy/doc/stm.rst
+++ b/pypy/doc/stm.rst
@@ -28,7 +28,8 @@
Introduction
============
-``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_
+``pypy-stm`` is a variant of the regular PyPy interpreter. (This
+version supports Python 2.7; see below for `Python 3`_.) With caveats_
listed below, it should be in theory within 20%-50% slower than a
regular PyPy, comparing the JIT version in both cases (but see below!).
It is called
@@ -92,9 +93,9 @@
We're busy fixing them as we find them; feel free to `report bugs`_.
* It runs with an overhead as low as 20% on examples like "richards".
- There are also other examples with higher overheads --up to 10x for
- "translate.py"-- which we are still trying to understand. One suspect
- is our partial GC implementation, see below.
+ There are also other examples with higher overheads --currently up to
+ 2x for "translate.py"-- which we are still trying to understand.
+ One suspect is our partial GC implementation, see below.
* Currently limited to 1.5 GB of RAM (this is just a parameter in
`core.h`__). Memory overflows are not correctly handled; they cause
@@ -111,9 +112,8 @@
* The GC is new; although clearly inspired by PyPy's regular GC, it
misses a number of optimizations for now. Programs allocating large
- numbers of small objects that don't immediately die, as well as
- programs that modify large lists or dicts, suffer from these missing
- optimizations.
+ numbers of small objects that don't immediately die (surely a common
+ situation) suffer from these missing optimizations.
* The GC has no support for destructors: the ``__del__`` method is never
called (including on file objects, which won't be closed for you).
@@ -138,6 +138,25 @@
+Python 3
+========
+
+In this document I describe "pypy-stm", which is based on PyPy's Python
+2.7 interpreter. Supporting Python 3 should take about half an
+afternoon of work. Obviously, what I *don't* mean is that by tomorrow
+you can have a finished and polished "pypy3-stm" product. General py3k
+work is still missing; and general stm work is also still missing. But
+they are rather independent from each other, as usual in PyPy. The
+required afternoon of work will certainly be done one of these days now
+that the internal interfaces seem to stabilize.
+
+The same is true for other languages implemented in the RPython
+framework, although the amount of work to put there might vary, because
+the STM framework within RPython is currently targeting the PyPy
+interpreter and other ones might have slightly different needs.
+
+
+
User Guide
==========
@@ -490,8 +509,6 @@
The last two lines are special; they are an internal marker read by
``transactional_memory.print_abort_info()``.
-These statistics are not printed out for the main thread, for now.
-
Reference to implementation details
-----------------------------------
diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst
--- a/pypy/doc/whatsnew-2.3.1.rst
+++ b/pypy/doc/whatsnew-2.3.1.rst
@@ -9,5 +9,16 @@
Support compilation with gcc-4.9
-Fixes for issues #1769, #1764, #1762, #1752
+Added support for the stdlib gdbm module via cffi
+Annotator cleanups
+
+.. branch: release-2.3.x
+
+.. branch: unify-call-ops
+
+.. branch packaging
+Use argparse for packaging.py, and add third-party components to LICENSE file.
+Also mention that gdbm is GPL.
+Do not crash the packaging process on failure in CFFI or license-building,
+rather complete the build step and return -1.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,11 +3,50 @@
=======================
.. this is a revision shortly after release-2.3.x
-.. startrev: b2cc67adbaad
+.. startrev: ca9b7cf02cf4
-Added support for the stdlib gdbm module via cffi
+.. branch: fix-bytearray-complexity
+Bytearray operations no longer copy the bytearray unnecessarily
-Annotator cleanups
+Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``,
+``__setslice__``, and ``__len__`` to RPython
-.. branch: release-2.3.x
+.. branch: stringbuilder2-perf
+Give the StringBuilder a more flexible internal structure, with a
+chained list of strings instead of just one string. This make it
+more efficient when building large strings, e.g. with cStringIO().
+Also, use systematically jit.conditional_call() instead of regular
+branches. This lets the JIT make more linear code, at the cost of
+forcing a bit more data (to be passed as arguments to
+conditional_calls). I would expect the net result to be a slight
+slow-down on some simple benchmarks and a speed-up on bigger
+programs.
+
+.. branch: ec-threadlocal
+Change the executioncontext's lookup to be done by reading a thread-
+local variable (which is implemented in C using '__thread' if
+possible, and pthread_getspecific() otherwise). On Linux x86 and
+x86-64, the JIT backend has a special optimization that lets it emit
+directly a single MOV from a %gs- or %fs-based address. It seems
+actually to give a good boost in performance.
+
+.. branch: fast-gil
+A faster way to handle the GIL, particularly in JIT code. The GIL is
+now a composite of two concepts: a global number (it's just set from
+1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there
+are threads waiting to acquire the GIL, one of them is actively
+checking the global number every 0.1 ms to 1 ms. Overall, JIT loops
+full of external function calls now run a bit faster (if no thread was
+started yet), or a *lot* faster (if threads were started already).
+
+.. branch: jit-get-errno
+Optimize the errno handling in the JIT, notably around external
+function calls. Linux-only.
+
+.. branch: disable_pythonapi
+Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this
+incompatibility with cpython. Recast sys.dllhandle to an int.
+
+.. branch: scalar-operations
+Fix performance regression on ufunc(<scalar>, <scalar>) in numpy.
diff --git a/pypy/doc/whatsnew-pypy3-2.3.1.rst b/pypy/doc/whatsnew-pypy3-2.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-pypy3-2.3.1.rst
@@ -0,0 +1,6 @@
+=========================
+What's new in PyPy3 2.3.1
+=========================
+
+.. this is a revision shortly after pypy3-release-2.3.x
+.. startrev: 0137d8e6657d
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -30,8 +30,6 @@
if w_dict is not None: # for tests
w_entry_point = space.getitem(w_dict, space.wrap('entry_point'))
w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel'))
- w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish))
- w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup))
withjit = space.config.objspace.usemodules.pypyjit
def entry_point(argv):
@@ -53,7 +51,7 @@
argv = argv[:1] + argv[3:]
try:
try:
- space.call_function(w_run_toplevel, w_call_startup_gateway)
+ space.startup()
w_executable = space.wrap(argv[0])
w_argv = space.newlist([space.wrap(s) for s in argv[1:]])
w_exitcode = space.call_function(w_entry_point, w_executable, w_argv)
@@ -69,7 +67,7 @@
return 1
finally:
try:
- space.call_function(w_run_toplevel, w_call_finish_gateway)
+ space.finish()
except OperationError, e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -184,11 +182,6 @@
'pypy_thread_attach': pypy_thread_attach,
'pypy_setup_home': pypy_setup_home}
-def call_finish(space):
- space.finish()
-
-def call_startup(space):
- space.startup()
# _____ Define and setup target ___
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -395,6 +395,7 @@
def startup(self):
# To be called before using the space
+ self.threadlocals.enter_thread(self)
# Initialize already imported builtin modules
from pypy.interpreter.module import Module
@@ -639,30 +640,36 @@
"""NOT_RPYTHON: Abstract method that should put some minimal
content into the w_builtins."""
- @jit.loop_invariant
def getexecutioncontext(self):
"Return what we consider to be the active execution context."
# Important: the annotator must not see a prebuilt ExecutionContext:
# you should not see frames while you translate
# so we make sure that the threadlocals never *have* an
# ExecutionContext during translation.
- if self.config.translating and not we_are_translated():
- assert self.threadlocals.getvalue() is None, (
- "threadlocals got an ExecutionContext during translation!")
- try:
- return self._ec_during_translation
- except AttributeError:
- ec = self.createexecutioncontext()
- self._ec_during_translation = ec
+ if not we_are_translated():
+ if self.config.translating:
+ assert self.threadlocals.get_ec() is None, (
+ "threadlocals got an ExecutionContext during translation!")
+ try:
+ return self._ec_during_translation
+ except AttributeError:
+ ec = self.createexecutioncontext()
+ self._ec_during_translation = ec
+ return ec
+ else:
+ ec = self.threadlocals.get_ec()
+ if ec is None:
+ self.threadlocals.enter_thread(self)
+ ec = self.threadlocals.get_ec()
return ec
- # normal case follows. The 'thread' module installs a real
- # thread-local object in self.threadlocals, so this builds
- # and caches a new ec in each thread.
- ec = self.threadlocals.getvalue()
- if ec is None:
- ec = self.createexecutioncontext()
- self.threadlocals.setvalue(ec)
- return ec
+ else:
+ # translated case follows. self.threadlocals is either from
+ # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'.
+ # the result is assumed to be non-null: enter_thread() was called
+ # by space.startup().
+ ec = self.threadlocals.get_ec()
+ assert ec is not None
+ return ec
def _freeze_(self):
return True
@@ -963,6 +970,13 @@
"""
return self.unpackiterable(w_iterable, expected_length)
+ def listview_no_unpack(self, w_iterable):
+ """ Same as listview() if cheap. If 'w_iterable' is something like
+ a generator, for example, then return None instead.
+ May return None anyway.
+ """
+ return None
+
def listview_bytes(self, w_list):
""" Return a list of unwrapped strings out of a list of strings. If the
argument is not a list or does not contain only strings, return None.
@@ -1487,9 +1501,7 @@
return buf.as_str()
def str_or_None_w(self, w_obj):
- if self.is_w(w_obj, self.w_None):
- return None
- return self.str_w(w_obj)
+ return None if self.is_none(w_obj) else self.str_w(w_obj)
def str_w(self, w_obj):
return w_obj.str_w(self)
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -496,6 +496,13 @@
"""
+class UserDelCallback(object):
+ def __init__(self, w_obj, callback, descrname):
+ self.w_obj = w_obj
+ self.callback = callback
+ self.descrname = descrname
+ self.next = None
+
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
@@ -506,12 +513,18 @@
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = []
+ self.dying_objects = None
+ self.dying_objects_last = None
self.finalizers_lock_count = 0
self.enabled_at_app_level = True
def register_callback(self, w_obj, callback, descrname):
- self.dying_objects.append((w_obj, callback, descrname))
+ cb = UserDelCallback(w_obj, callback, descrname)
+ if self.dying_objects_last is None:
+ self.dying_objects = cb
+ else:
+ self.dying_objects_last.next = cb
+ self.dying_objects_last = cb
self.fire()
def perform(self, executioncontext, frame):
@@ -525,13 +538,33 @@
# avoid too deep recursions of the kind of __del__ being called
# while in the middle of another __del__ call.
pending = self.dying_objects
- self.dying_objects = []
+ self.dying_objects = None
+ self.dying_objects_last = None
space = self.space
- for i in range(len(pending)):
- w_obj, callback, descrname = pending[i]
- pending[i] = (None, None, None)
+ while pending is not None:
try:
- callback(w_obj)
+ pending.callback(pending.w_obj)
except OperationError, e:
- e.write_unraisable(space, descrname, w_obj)
+ e.write_unraisable(space, pending.descrname, pending.w_obj)
e.clear(space) # break up reference cycles
+ pending = pending.next
+ #
+ # Note: 'dying_objects' used to be just a regular list instead
+ # of a chained list. This was the cause of "leaks" if we have a
+ # program that constantly creates new objects with finalizers.
+ # Here is why: say 'dying_objects' is a long list, and there
+ # are n instances in it. Then we spend some time in this
+ # function, possibly triggering more GCs, but keeping the list
+ # of length n alive. Then the list is suddenly freed at the
+ # end, and we return to the user program. At this point the
+ # GC limit is still very high, because just before, there was
+ # a list of length n alive. Assume that the program continues
+ # to allocate a lot of instances with finalizers. The high GC
+ # limit means that it could allocate a lot of instances before
+ # reaching it --- possibly more than n. So the whole procedure
+ # repeats with higher and higher values of n.
+ #
+ # This does not occur in the current implementation because
+ # there is no list of length n: if n is large, then the GC
+ # will run several times while walking the list, but it will
+ # see lower and lower memory usage, with no lower bound of n.
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -895,7 +895,7 @@
"use unwrap_spec(...=WrappedDefault(default))" % (
self._code.identifier, name, defaultval))
defs_w.append(None)
- else:
+ elif name != '__args__' and name != 'args_w':
defs_w.append(space.wrap(defaultval))
if self._code._unwrap_spec:
UNDEFINED = object()
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -61,6 +61,13 @@
return self.send_ex(w_arg)
def send_ex(self, w_arg, operr=None):
+ pycode = self.pycode
+ if jit.we_are_jitted() and should_not_inline(pycode):
+ generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg,
+ operr=operr, pycode=pycode)
+ return self._send_ex(w_arg, operr)
+
+ def _send_ex(self, w_arg, operr):
space = self.space
if self.running:
raise OperationError(space.w_ValueError,
@@ -72,8 +79,7 @@
if operr is None:
operr = OperationError(space.w_StopIteration, space.w_None)
raise operr
- # XXX it's not clear that last_instr should be promoted at all
- # but as long as it is necessary for call_assembler, let's do it early
+
last_instr = jit.promote(frame.last_instr)
if last_instr == -1:
if w_arg and not space.is_w(w_arg, space.w_None):
@@ -214,3 +220,38 @@
"interrupting generator of ")
break
block = block.previous
+
+
+
+def get_printable_location_genentry(bytecode):
+ return '%s <generator>' % (bytecode.get_repr(),)
+generatorentry_driver = jit.JitDriver(greens=['pycode'],
+ reds=['gen', 'w_arg', 'operr'],
+ get_printable_location =
+ get_printable_location_genentry,
+ name='generatorentry')
+
+from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap
+YIELD_VALUE = opmap['YIELD_VALUE']
+
+ at jit.elidable_promote()
+def should_not_inline(pycode):
+ # Should not inline generators with more than one "yield",
+ # as an approximative fix (see issue #1782). There are cases
+ # where it slows things down; for example calls to a simple
+ # generator that just produces a few simple values with a few
+ # consecutive "yield" statements. It fixes the near-infinite
+ # slow-down in issue #1782, though...
+ count_yields = 0
+ code = pycode.co_code
+ n = len(code)
+ i = 0
+ while i < n:
+ c = code[i]
+ op = ord(c)
+ if op == YIELD_VALUE:
+ count_yields += 1
+ i += 1
+ if op >= HAVE_ARGUMENT:
+ i += 2
+ return count_yields >= 2
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -11,11 +11,14 @@
"""
_value = None
- def getvalue(self):
+ def get_ec(self):
return self._value
- def setvalue(self, value):
- self._value = value
+ def enter_thread(self, space):
+ self._value = space.createexecutioncontext()
+
+ def try_enter_thread(self, space):
+ return False
def signals_enabled(self):
return True
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -511,10 +511,10 @@
for i in range(min(len(varnames), self.getcode().co_nlocals)):
name = varnames[i]
w_value = self.locals_stack_w[i]
- w_name = self.space.wrap(name)
if w_value is not None:
- self.space.setitem(self.w_locals, w_name, w_value)
+ self.space.setitem_str(self.w_locals, name, w_value)
else:
+ w_name = self.space.wrap(name)
try:
self.space.delitem(self.w_locals, w_name)
except OperationError as e:
@@ -534,8 +534,7 @@
except ValueError:
pass
else:
- w_name = self.space.wrap(name)
- self.space.setitem(self.w_locals, w_name, w_value)
+ self.space.setitem_str(self.w_locals, name, w_value)
@jit.unroll_safe
More information about the pypy-commit
mailing list