From pypy.commits at gmail.com Sun May 1 00:33:27 2016
From: pypy.commits at gmail.com (mattip)
Date: Sat, 30 Apr 2016 21:33:27 -0700 (PDT)
Subject: [pypy-commit] pypy default: start release cycle
Message-ID: <57258717.08851c0a.27447.ffffb4e6@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84074:0b2e75889888
Date: 2016-05-01 07:29 +0300
http://bitbucket.org/pypy/pypy/changeset/0b2e75889888/
Log: start release cycle
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -1,7 +1,7 @@
# Edit these appropriately before running this script
maj=5
min=1
-rev=0
+rev=1
branchname=release-$maj.x # ==OR== release-$maj.$min.x
tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev
From pypy.commits at gmail.com Sun May 1 00:33:28 2016
From: pypy.commits at gmail.com (mattip)
Date: Sat, 30 Apr 2016 21:33:28 -0700 (PDT)
Subject: [pypy-commit] pypy default: Added tag release-5.1.1 for changeset
b0a649e90b66
Message-ID: <57258718.22d8c20a.8d802.ffff9c51@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84075:ce68e84f6208
Date: 2016-05-01 07:32 +0300
http://bitbucket.org/pypy/pypy/changeset/ce68e84f6208/
Log: Added tag release-5.1.1 for changeset b0a649e90b66
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -21,3 +21,4 @@
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
From pypy.commits at gmail.com Sun May 1 01:32:21 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 22:32:21 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: Make test_version compatible with
pytest.py -A.
Message-ID: <572594e5.10691c0a.b3a6a.ffffc408@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84076:7e8bd9d01613
Date: 2016-04-30 22:30 -0700
http://bitbucket.org/pypy/pypy/changeset/7e8bd9d01613/
Log: Make test_version compatible with pytest.py -A.
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -1,4 +1,6 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
@@ -22,8 +24,6 @@
PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION);
PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION);
PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION);
- PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
- PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
}
"""
module = self.import_module(name='foo', init=init)
@@ -31,6 +31,18 @@
assert module.py_major_version == sys.version_info.major
assert module.py_minor_version == sys.version_info.minor
assert module.py_micro_version == sys.version_info.micro
+
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
+ def test_pypy_versions(self):
+ import sys
+ init = """
+ if (Py_IsInitialized()) {
+ PyObject *m = Py_InitModule("foo", NULL);
+ PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
+ PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
+ }
+ """
+ module = self.import_module(name='foo', init=init)
v = sys.pypy_version_info
s = '%d.%d.%d' % (v[0], v[1], v[2])
if v.releaselevel != 'final':
From pypy.commits at gmail.com Sun May 1 02:19:34 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 30 Apr 2016 23:19:34 -0700 (PDT)
Subject: [pypy-commit] pypy share-cpyext-cpython-api: ignore
'result_borrowed' if 'result_is_ll', also for the purposes of sharing
Message-ID: <57259ff6.c9b0c20a.e1f64.ffffb5e4@mx.google.com>
Author: Armin Rigo
Branch: share-cpyext-cpython-api
Changeset: r84077:d520d25aa845
Date: 2016-05-01 08:19 +0200
http://bitbucket.org/pypy/pypy/changeset/d520d25aa845/
Log: ignore 'result_borrowed' if 'result_is_ll', also for the purposes of
sharing
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -727,10 +727,16 @@
assert not error_value # only support error=NULL
error_value = 0 # because NULL is not hashable
+ if callable.api_func.result_is_ll:
+ result_kind = "L"
+ elif callable.api_func.result_borrowed:
+ result_kind = "B" # note: 'result_borrowed' is ignored if we also
+ else: # say 'result_is_ll=True' (in this case it's
+ result_kind = "." # up to you to handle refcounting anyway)
+
signature = (tuple(argtypesw),
callable.api_func.restype,
- callable.api_func.result_borrowed,
- callable.api_func.result_is_ll,
+ result_kind,
error_value,
gil)
@@ -780,7 +786,7 @@
assert False
def make_wrapper_second_level(space, callable2name, argtypesw, restype,
- result_borrowed, result_is_ll, error_value, gil):
+ result_kind, error_value, gil):
from rpython.rlib import rgil
argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
fatal_value = restype._defl()
@@ -885,12 +891,12 @@
elif is_PyObject(restype):
if is_pyobj(result):
- if not result_is_ll:
+ if result_kind != "L":
raise invalid("missing result_is_ll=True")
else:
- if result_is_ll:
+ if result_kind == "L":
raise invalid("result_is_ll=True but not ll PyObject")
- if result_borrowed:
+ if result_kind == "B": # borrowed
result = as_pyobj(space, result)
else:
result = make_ref(space, result)
From pypy.commits at gmail.com Sun May 1 02:24:27 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 23:24:27 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: Fix test_hash to deal with
lazily-created ->hash on CPython.
Message-ID: <5725a11b.e7bec20a.de053.ffffb43d@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84078:1b3b372c1d9c
Date: 2016-04-30 22:42 -0700
http://bitbucket.org/pypy/pypy/changeset/1b3b372c1d9c/
Log: Fix test_hash to deal with lazily-created ->hash on CPython.
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -85,8 +85,11 @@
'''
),
])
- res = module.test_hash(u"xyz")
- assert res == hash(u'xyz')
+ obj = u'xyz'
+ # CPython in particular does not precompute ->hash, so we need to call
+ # hash() first.
+ expected_hash = hash(obj)
+ assert module.test_hash(obj) == expected_hash
def test_default_encoded_string(self):
module = self.import_extension('foo', [
From pypy.commits at gmail.com Sun May 1 02:24:29 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 23:24:29 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: Skip test_thread's tests when
running tests against CPython.
Message-ID: <5725a11d.442cc20a.ce956.ffffb23e@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84079:8ada27896c7b
Date: 2016-04-30 22:58 -0700
http://bitbucket.org/pypy/pypy/changeset/8ada27896c7b/
Log: Skip test_thread's tests when running tests against CPython.
diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py
--- a/pypy/module/cpyext/test/test_thread.py
+++ b/pypy/module/cpyext/test/test_thread.py
@@ -1,9 +1,12 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class AppTestThread(AppTestCpythonExtensionBase):
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_get_thread_ident(self):
module = self.import_extension('foo', [
("get_thread_ident", "METH_NOARGS",
@@ -30,6 +33,7 @@
assert results[0][0] != results[1][0]
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_acquire_lock(self):
module = self.import_extension('foo', [
("test_acquire_lock", "METH_NOARGS",
@@ -53,13 +57,14 @@
])
module.test_acquire_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_release_lock(self):
module = self.import_extension('foo', [
("test_release_lock", "METH_NOARGS",
"""
#ifndef PyThread_release_lock
#error "seems we are not accessing PyPy's functions"
-#endif
+#endif
PyThread_type_lock lock = PyThread_allocate_lock();
PyThread_acquire_lock(lock, 1);
PyThread_release_lock(lock);
@@ -74,6 +79,7 @@
])
module.test_release_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_tls(self):
module = self.import_extension('foo', [
("create_key", "METH_NOARGS",
From pypy.commits at gmail.com Sun May 1 02:24:31 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 23:24:31 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: Include datetime.h in
test_datetime, for CPython-compatibility.
Message-ID: <5725a11f.923f1c0a.86ef1.ffffc5eb@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84080:c914cf3f4816
Date: 2016-04-30 23:05 -0700
http://bitbucket.org/pypy/pypy/changeset/c914cf3f4816/
Log: Include datetime.h in test_datetime, for CPython-compatibility.
diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -109,7 +109,7 @@
Py_RETURN_NONE;
"""
)
- ])
+ ], prologue='#include "datetime.h"\n')
import datetime
assert module.get_types() == (datetime.date,
datetime.datetime,
From pypy.commits at gmail.com Sun May 1 02:24:33 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 23:24:33 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: Only use PyDictProxy_Check[Exact]
in PyPy, in test_dictobject.
Message-ID: <5725a121.876cc20a.6f1dd.ffffb55d@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84081:96d93cf8a09e
Date: 2016-04-30 23:12 -0700
http://bitbucket.org/pypy/pypy/changeset/96d93cf8a09e/
Log: Only use PyDictProxy_Check[Exact] in PyPy, in test_dictobject.
diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py
--- a/pypy/module/cpyext/test/test_dictobject.py
+++ b/pypy/module/cpyext/test/test_dictobject.py
@@ -181,6 +181,7 @@
if (!PyArg_ParseTuple(args, "O", &dict))
return NULL;
proxydict = PyDictProxy_New(dict);
+#ifdef PYPY_VERSION // PyDictProxy_Check[Exact] are PyPy-specific.
if (!PyDictProxy_Check(proxydict)) {
Py_DECREF(proxydict);
PyErr_SetNone(PyExc_ValueError);
@@ -191,6 +192,7 @@
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
+#endif // PYPY_VERSION
i = PyObject_Size(proxydict);
Py_DECREF(proxydict);
return PyLong_FromLong(i);
From pypy.commits at gmail.com Sun May 1 02:24:34 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 23:24:34 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: Allow the initial refcount of ()
to be != 1 in test_tupleobject, for CPython-compatibility of the test.
Message-ID: <5725a122.a60ac20a.71f16.ffffb197@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84082:dbc56228353b
Date: 2016-04-30 23:22 -0700
http://bitbucket.org/pypy/pypy/changeset/dbc56228353b/
Log: Allow the initial refcount of () to be != 1 in test_tupleobject, for
CPython-compatibility of the test.
diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py
--- a/pypy/module/cpyext/test/test_tupleobject.py
+++ b/pypy/module/cpyext/test/test_tupleobject.py
@@ -84,7 +84,14 @@
"""
PyObject *item = PyTuple_New(0);
PyObject *t = PyTuple_New(1);
- if (t->ob_refcnt != 1 || item->ob_refcnt != 1) {
+#ifdef PYPY_VERSION
+ // PyPy starts even empty tuples with a refcount of 1.
+ const int initial_item_refcount = 1;
+#else
+ // CPython can cache ().
+ const int initial_item_refcount = item->ob_refcnt;
+#endif // PYPY_VERSION
+ if (t->ob_refcnt != 1 || item->ob_refcnt != initial_item_refcount) {
PyErr_SetString(PyExc_SystemError, "bad initial refcnt");
return NULL;
}
@@ -94,8 +101,8 @@
PyErr_SetString(PyExc_SystemError, "SetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "SetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
@@ -109,8 +116,8 @@
PyErr_SetString(PyExc_SystemError, "GetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
return t;
From pypy.commits at gmail.com Sun May 1 02:32:53 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sat, 30 Apr 2016 23:32:53 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-test-A: ifdef-out a weird tp_basicsize
test in CPython.
Message-ID: <5725a315.cf8ec20a.1afa0.ffffb4d8@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-test-A
Changeset: r84083:a80c3c091bfa
Date: 2016-04-30 23:31 -0700
http://bitbucket.org/pypy/pypy/changeset/a80c3c091bfa/
Log: ifdef-out a weird tp_basicsize test in CPython.
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -24,8 +24,11 @@
if(PyUnicode_GetSize(s) != 11) {
result = -PyUnicode_GetSize(s);
}
+#ifdef PYPY_VERSION
+ // Slightly silly test that tp_basicsize is reasonable.
if(s->ob_type->tp_basicsize != sizeof(void*)*7)
result = s->ob_type->tp_basicsize;
+#endif // PYPY_VERSION
Py_DECREF(s);
return PyLong_FromLong(result);
"""),
From pypy.commits at gmail.com Sun May 1 02:34:53 2016
From: pypy.commits at gmail.com (mattip)
Date: Sat, 30 Apr 2016 23:34:53 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update hashes (without
regenerating)
Message-ID: <5725a38d.d2aa1c0a.1ecec.5d36@mx.google.com>
Author: Matti Picus
Branch: extradoc
Changeset: r742:62e826dccf0c
Date: 2016-05-01 09:34 +0300
http://bitbucket.org/pypy/pypy.org/changeset/62e826dccf0c/
Log: update hashes (without regenerating)
diff --git a/source/download.txt b/source/download.txt
--- a/source/download.txt
+++ b/source/download.txt
@@ -10,16 +10,17 @@
There are `nightly binary builds`_ available. Those builds are not always
as stable as the release, but they contain numerous bugfixes and
- performance improvements.
+ performance improvements.
We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:
-* the Python2.7 compatible release — **PyPy 5.1** — (`what's new in PyPy 5.1?`_)
+* the Python2.7 compatible release — **PyPy 5.1.1** — (`what's new in PyPy 5.1?`_ and `what's new in PyPy 5.1.1?`_ )
* the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_).
* the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only)
.. _what's new in PyPy 5.1?: http://doc.pypy.org/en/latest/release-5.1.0.html
+.. _what's new in PyPy 5.1.1?: http://doc.pypy.org/en/latest/release-5.1.1.html
.. _what's new in PyPy3 2.4.0?: http://doc.pypy.org/en/latest/release-pypy3-2.4.0.html
@@ -55,7 +56,7 @@
that **Linux binaries are only usable on the distributions written next to
them** unless you're ready to hack your system by adding symlinks to the
libraries it tries to open. There are better solutions:
-
+
* use Squeaky's `portable Linux binaries`_.
* or download PyPy from your release vendor (usually an outdated
@@ -91,17 +92,17 @@
* `All our downloads,`__ including previous versions. We also have a
mirror_, but please use only if you have troubles accessing the links above
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux64.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux-armhf-raspbian.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux-armhf-raring.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux-armel.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-osx64.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-win32.zip
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0++-ppc64.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0+-ppc64le.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-src.tar.bz2
-.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-src.zip
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-linux.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-linux64.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-linux-armhf-raring.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-linux-armel.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-osx64.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-win32.zip
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1++-ppc64.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1+-ppc64le.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-src.tar.bz2
+.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-src.zip
.. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
.. __: https://bitbucket.org/pypy/pypy/downloads
.. _mirror: http://buildbot.pypy.org/mirror/
@@ -201,7 +202,7 @@
uncompressed, they run in-place. For now you can uncompress them
either somewhere in your home directory or, say, in ``/opt``, and
if you want, put a symlink from somewhere like
-``/usr/local/bin/pypy`` to ``/path/to/pypy-5.1.0/bin/pypy``. Do
+``/usr/local/bin/pypy`` to ``/path/to/pypy-5.1.1/bin/pypy``. Do
not move or copy the executable ``pypy`` outside the tree --- put
a symlink to it, otherwise it will not find its libraries.
@@ -231,10 +232,6 @@
If you have pip::
pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
- pypy -m pip install git+https://bitbucket.org/pypy/numpy.git at pypy-5.1
-
-(the second version selects a particular tag, which may be needed if your
-pypy is not the latest development version.)
Alternatively, the direct way::
@@ -330,7 +327,7 @@
you first need to refer to the `Windows build instructions`_. More
precisely, translation on 32-bit takes at this point 2.7 GB if PyPy is
used and 2.9 GB if CPython is used. There are two workarounds:
-
+
1. use PyPy, not CPython. If you don't have any PyPy so far, not even
an older version, then you need to build one first, with some parts
removed. So, first translate with ``...rpython -Ojit
@@ -390,7 +387,7 @@
anyway, note an easy-to-miss point: some modules are written with CFFI,
and require some compilation. If you install PyPy as root without
pre-compiling them, normal users will get errors:
-
+
* PyPy 2.5.1 or earlier: normal users would see permission errors.
Installers need to run ``pypy -c "import gdbm"`` and other similar
commands at install time; the exact list is in `package.py`_. Users
@@ -415,6 +412,19 @@
Here are the checksums for each of the downloads
+pypy-5.1.1 md5::
+
+ 3fa98eb80ef5caa5a6f9d4468409a632 pypy-5.1.1-linux64.tar.bz2
+ 1d5874f076d18ecd4fd50054cca0c383 pypy-5.1.1-linux-armel.tar.bz2
+ 9e47e370d57293074bbef6c4c0c4736d pypy-5.1.1-linux-armhf-raring.tar.bz2
+ b6643215abc92ed8efd94e6205305a36 pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+ 224e4d5870d88fb444d8f4f1791140e5 pypy-5.1.1-linux.tar.bz2
+ e35510b39e34f1c2199c283bf8655e5c pypy-5.1.1-osx64.tar.bz2
+ 9d8b82448416e0203efa325364f759e8 pypy-5.1.1-s390x.tar.bz2
+ 7aff685c28941fda6a74863c53931e38 pypy-5.1.1-src.tar.bz2
+ ee9795d8638d34126ca24e4757a73056 pypy-5.1.1-src.zip
+ d70b4385fbf0a5e5260f6b7bedb231d4 pypy-5.1.1-win32.zip
+
pypy-5.1.0 md5::
17baf9db5200559b9d6c45ec8f60ea48 pypy-5.1.0-linux-armel.tar.bz2
@@ -430,23 +440,36 @@
pypy3-2.4.0 md5::
- eadbc9790823fc0ae40c943087cd7cb3 pypy3-2.4.0-linux64.tar.bz2
- 7ab84727da2d5363866907f2f7921d86 pypy3-2.4.0-linux-armel.tar.bz2
- 83158d3a55ca134b179ef01dc2bb6a30 pypy3-2.4.0-linux-armhf-raring.tar.bz2
- b0b81cfa46e970c584bda10feebe1a85 pypy3-2.4.0-linux-armhf-raspbian.tar.bz2
- 68af7a6ca5948a1448a4b9c839d1472c pypy3-2.4.0-linux.tar.bz2
- c6cd12602469446db1dfa1e2bc6c699c pypy3-2.4.0-osx64.tar.bz2
- 8514f16b1a6262828e824bd8b37607db pypy3-2.4.0-win32.zip
- 96ba72916114d16904e12562b5d84e51 pypy3-2.4.0-src.tar.bz2
- c58015d0d3e08a9f24b93b8edca26d4d pypy3-2.4.0-src.zip
+ eadbc9790823fc0ae40c943087cd7cb3 pypy3-2.4.0-linux64.tar.bz2
+ 7ab84727da2d5363866907f2f7921d86 pypy3-2.4.0-linux-armel.tar.bz2
+ 83158d3a55ca134b179ef01dc2bb6a30 pypy3-2.4.0-linux-armhf-raring.tar.bz2
+ b0b81cfa46e970c584bda10feebe1a85 pypy3-2.4.0-linux-armhf-raspbian.tar.bz2
+ 68af7a6ca5948a1448a4b9c839d1472c pypy3-2.4.0-linux.tar.bz2
+ c6cd12602469446db1dfa1e2bc6c699c pypy3-2.4.0-osx64.tar.bz2
+ 8514f16b1a6262828e824bd8b37607db pypy3-2.4.0-win32.zip
+ 96ba72916114d16904e12562b5d84e51 pypy3-2.4.0-src.tar.bz2
+ c58015d0d3e08a9f24b93b8edca26d4d pypy3-2.4.0-src.zip
pypy-1.8 sandbox md5::
- 2c9f0054f3b93a6473f10be35277825a pypy-1.8-sandbox-linux64.tar.bz2
- 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2
+ 2c9f0054f3b93a6473f10be35277825a pypy-1.8-sandbox-linux64.tar.bz2
+ 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2
+pypy-5.1.1 sha1::
+
+ 9ffc1fe9dfeec77a705b0d1af257da7e87894f5a pypy-5.1.1-linux64.tar.bz2
+ e432b157bc4cd2b5a21810ff45fd9a1507e8b8bf pypy-5.1.1-linux-armel.tar.bz2
+ 5ed85f83566a4de5838c8b549943cb79250386ad pypy-5.1.1-linux-armhf-raring.tar.bz2
+ ddd1c20e049fcbc01f2bd9173ad77033540722a9 pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+ 6767056bb71081bce8fcee04de0d0be02d71d4f9 pypy-5.1.1-linux.tar.bz2
+ 734eb82489d57a3b2b55d6b83153b3972dc6781d pypy-5.1.1-osx64.tar.bz2
+ 2440d613430f9dfc57bc8db5cfd087f1169ee2d0 pypy-5.1.1-s390x.tar.bz2
+ 34eca157e025e65f9dc1f419fa56ce31ad635e9c pypy-5.1.1-src.tar.bz2
+ 95596b62cf2bb6ebd4939584040e713ceec9ef0a pypy-5.1.1-src.zip
+ 3694e37c1cf6a2a938c108ee69126e4f40a0886e pypy-5.1.1-win32.zip
+
pypy-5.1.0 sha1::
114d4f981956b83cfbc0a3c819fdac0b0550cd82 pypy-5.1.0-linux-armel.tar.bz2
@@ -460,6 +483,19 @@
a184ef5ada93d53e8dc4a9850a9ed764bd661d7b pypy-5.1.0-src.zip
4daba0932afcc4755d93d55aa3cbdd851da9198d pypy-5.1.0-win32.zip
+pypy-5.1.1 sha256::
+
+ c852622e8bc81618c137da35fcf57b2349b956c07b6fd853300846e3cefa64fc pypy-5.1.1-linux64.tar.bz2
+ 062b33641c24dfc8c6b5af955c2ddf3815b471de0af4bfc343020651b94d13bf pypy-5.1.1-linux-armel.tar.bz2
+ c4bcdabccd15669ea44d1c715cd36b2ca55b340a27b63e1a92ef5ab6eb158a8d pypy-5.1.1-linux-armhf-raring.tar.bz2
+ fc2a1f8719a7eca5d85d0bdcf499c6ab7409fc32aa312435bcbe66950b47e863 pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+ 7951fd2b87c9e621ec57c932c20da2b8a4a9e87d8daeb9e2b7373f9444219abc pypy-5.1.1-linux.tar.bz2
+ fe2bbb7cf95eb91b1724029f81e85d1dbb6025a2e9a005cfe7258fe07602f771 pypy-5.1.1-osx64.tar.bz2
+ 4acd1066e07eb668665b302bf8e9338b6df136082c5ce28c62b70c6bb1b5cf9f pypy-5.1.1-s390x.tar.bz2
+ 99aff0c710c46903b821c7c436f9cb9de16bd7370d923f99cc7c28a66be6c5b2 pypy-5.1.1-src.tar.bz2
+ 7c0c5157e7977674aa942de3c20ff0567f7af986824f6674e2424f6089c41501 pypy-5.1.1-src.zip
+ 22a780e328ef053e098f2edc2302957ac3119adf7bf11ff23e225931806e7bcd pypy-5.1.1-win32.zip
+
pypy-5.1.0 sha256::
ea7017449ff0630431866423220c3688fc55c1a0b80a96af0ae138dd0751b81c pypy-5.1.0-linux-armel.tar.bz2
From pypy.commits at gmail.com Sun May 1 02:36:50 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 30 Apr 2016 23:36:50 -0700 (PDT)
Subject: [pypy-commit] pypy share-cpyext-cpython-api: Close branch,
ready for merge (well, the tests don't seem to fail more
Message-ID: <5725a402.e109c20a.8d89a.ffffb1d6@mx.google.com>
Author: Armin Rigo
Branch: share-cpyext-cpython-api
Changeset: r84084:42e5e6b5fd26
Date: 2016-05-01 08:34 +0200
http://bitbucket.org/pypy/pypy/changeset/42e5e6b5fd26/
Log: Close branch, ready for merge (well, the tests don't seem to fail
more than on default...)
From pypy.commits at gmail.com Sun May 1 02:36:52 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 30 Apr 2016 23:36:52 -0700 (PDT)
Subject: [pypy-commit] pypy default: hg merge share-cpyext-cpython-api
Message-ID: <5725a404.c30a1c0a.18ac.ffffd03f@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84085:653a1c24d024
Date: 2016-05-01 08:36 +0200
http://bitbucket.org/pypy/pypy/changeset/653a1c24d024/
Log: hg merge share-cpyext-cpython-api
Share the ~one thousand @cpython_api function wrappers, according to
the signature. This reduces the number to ~200 or 250, and this
alone seems to give a more than 10% size win on the final pypy-c
(measured without the JIT). This should cancel the effect of the
size boost from 'cpyext-for-merge'.
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -10,6 +10,7 @@
from rpython.rtyper.lltypesystem import ll2ctypes
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
+from rpython.rlib.objectmodel import dont_inline
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -255,7 +256,7 @@
class ApiFunction:
def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED,
- c_name=None, gil=None, result_borrowed=False):
+ c_name=None, gil=None, result_borrowed=False, result_is_ll=False):
self.argtypes = argtypes
self.restype = restype
self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype))
@@ -276,6 +277,9 @@
assert len(self.argnames) == len(self.argtypes)
self.gil = gil
self.result_borrowed = result_borrowed
+ self.result_is_ll = result_is_ll
+ if result_is_ll: # means 'returns a low-level PyObject pointer'
+ assert is_PyObject(restype)
#
def get_llhelper(space):
return llhelper(self.functype, self.get_wrapper(space))
@@ -297,7 +301,7 @@
DEFAULT_HEADER = 'pypy_decl.h'
def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER,
- gil=None, result_borrowed=False):
+ gil=None, result_borrowed=False, result_is_ll=False):
"""
Declares a function to be exported.
- `argtypes`, `restype` are lltypes and describe the function signature.
@@ -336,7 +340,8 @@
c_name = func_name
api_function = ApiFunction(argtypes, restype, func, error,
c_name=c_name, gil=gil,
- result_borrowed=result_borrowed)
+ result_borrowed=result_borrowed,
+ result_is_ll=result_is_ll)
func.api_func = api_function
if error is _NOT_SPECIFIED:
@@ -612,6 +617,9 @@
def is_PyObject(TYPE):
if not isinstance(TYPE, lltype.Ptr):
return False
+ if TYPE == PyObject:
+ return True
+ assert not isinstance(TYPE.TO, lltype.ForwardReference)
return hasattr(TYPE.TO, 'c_ob_refcnt') and hasattr(TYPE.TO, 'c_ob_type')
# a pointer to PyObject
@@ -668,37 +676,158 @@
pypy_debug_catch_fatal_exception = rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void)
+
+# ____________________________________________________________
+
+
+class WrapperCache(object):
+ def __init__(self, space):
+ self.space = space
+ self.wrapper_gens = {} # {signature: WrapperGen()}
+ self.stats = [0, 0]
+
+class WrapperGen(object):
+ wrapper_second_level = None
+
+ def __init__(self, space, signature):
+ self.space = space
+ self.signature = signature
+ self.callable2name = []
+
+ def make_wrapper(self, callable):
+ self.callable2name.append((callable, callable.__name__))
+ if self.wrapper_second_level is None:
+ self.wrapper_second_level = make_wrapper_second_level(
+ self.space, self.callable2name, *self.signature)
+ wrapper_second_level = self.wrapper_second_level
+
+ def wrapper(*args):
+ # no GC here, not even any GC object
+ args += (callable,)
+ return wrapper_second_level(*args)
+
+ wrapper.__name__ = "wrapper for %r" % (callable, )
+ return wrapper
+
+
# Make the wrapper for the cases (1) and (2)
def make_wrapper(space, callable, gil=None):
"NOT_RPYTHON"
+ # This logic is obscure, because we try to avoid creating one
+ # big wrapper() function for every callable. Instead we create
+ # only one per "signature".
+
+ argnames = callable.api_func.argnames
+ argtypesw = zip(callable.api_func.argtypes,
+ [_name.startswith("w_") for _name in argnames])
+ error_value = getattr(callable.api_func, "error_value", CANNOT_FAIL)
+ if (isinstance(callable.api_func.restype, lltype.Ptr)
+ and error_value is not CANNOT_FAIL):
+ assert lltype.typeOf(error_value) == callable.api_func.restype
+ assert not error_value # only support error=NULL
+ error_value = 0 # because NULL is not hashable
+
+ if callable.api_func.result_is_ll:
+ result_kind = "L"
+ elif callable.api_func.result_borrowed:
+ result_kind = "B" # note: 'result_borrowed' is ignored if we also
+ else: # say 'result_is_ll=True' (in this case it's
+ result_kind = "." # up to you to handle refcounting anyway)
+
+ signature = (tuple(argtypesw),
+ callable.api_func.restype,
+ result_kind,
+ error_value,
+ gil)
+
+ cache = space.fromcache(WrapperCache)
+ cache.stats[1] += 1
+ try:
+ wrapper_gen = cache.wrapper_gens[signature]
+ except KeyError:
+ print signature
+ wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
+ signature)
+ cache.stats[0] += 1
+ print 'Wrapper cache [wrappers/total]:', cache.stats
+ return wrapper_gen.make_wrapper(callable)
+
+
+ at dont_inline
+def deadlock_error(funcname):
+ fatalerror_notb("GIL deadlock detected when a CPython C extension "
+ "module calls '%s'" % (funcname,))
+
+ at dont_inline
+def no_gil_error(funcname):
+ fatalerror_notb("GIL not held when a CPython C extension "
+ "module calls '%s'" % (funcname,))
+
+ at dont_inline
+def not_supposed_to_fail(funcname):
+ raise SystemError("The function '%s' was not supposed to fail"
+ % (funcname,))
+
+ at dont_inline
+def unexpected_exception(funcname, e, tb):
+ print 'Fatal error in cpyext, CPython compatibility layer, calling',funcname
+ print 'Either report a bug or consider not using this particular extension'
+ if not we_are_translated():
+ if tb is None:
+ tb = sys.exc_info()[2]
+ import traceback
+ traceback.print_exc()
+ if sys.stdout == sys.__stdout__:
+ import pdb; pdb.post_mortem(tb)
+ # we can't do much here, since we're in ctypes, swallow
+ else:
+ print str(e)
+ pypy_debug_catch_fatal_exception()
+ assert False
+
+def make_wrapper_second_level(space, callable2name, argtypesw, restype,
+ result_kind, error_value, gil):
from rpython.rlib import rgil
- names = callable.api_func.argnames
- argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes,
- [name.startswith("w_") for name in names])))
- fatal_value = callable.api_func.restype._defl()
+ argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
+ fatal_value = restype._defl()
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
pygilstate_ensure = (gil == "pygilstate_ensure")
pygilstate_release = (gil == "pygilstate_release")
assert (gil is None or gil_acquire or gil_release
or pygilstate_ensure or pygilstate_release)
- deadlock_error = ("GIL deadlock detected when a CPython C extension "
- "module calls %r" % (callable.__name__,))
- no_gil_error = ("GIL not held when a CPython C extension "
- "module calls %r" % (callable.__name__,))
+ expected_nb_args = len(argtypesw) + pygilstate_ensure
- @specialize.ll()
- def wrapper(*args):
+ if isinstance(restype, lltype.Ptr) and error_value == 0:
+ error_value = lltype.nullptr(restype.TO)
+ if error_value is not CANNOT_FAIL:
+ assert lltype.typeOf(error_value) == lltype.typeOf(fatal_value)
+
+ def invalid(err):
+ "NOT_RPYTHON: translation-time crash if this ends up being called"
+ raise ValueError(err)
+ invalid.__name__ = 'invalid_%s' % (callable2name[0][1],)
+
+ def nameof(callable):
+ for c, n in callable2name:
+ if c is callable:
+ return n
+ return ''
+ nameof._dont_inline_ = True
+
+ def wrapper_second_level(*args):
from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj
from pypy.module.cpyext.pyobject import as_pyobj
# we hope that malloc removal removes the newtuple() that is
# inserted exactly here by the varargs specializer
+ callable = args[-1]
+ args = args[:-1]
# see "Handling of the GIL" above (careful, we don't have the GIL here)
tid = rthread.get_or_make_ident()
if gil_acquire:
if cpyext_glob_tid_ptr[0] == tid:
- fatalerror_notb(deadlock_error)
+ deadlock_error(nameof(callable))
rgil.acquire()
assert cpyext_glob_tid_ptr[0] == 0
elif pygilstate_ensure:
@@ -711,7 +840,7 @@
args += (pystate.PyGILState_UNLOCKED,)
else:
if cpyext_glob_tid_ptr[0] != tid:
- fatalerror_notb(no_gil_error)
+ no_gil_error(nameof(callable))
cpyext_glob_tid_ptr[0] = 0
rffi.stackcounter.stacks_counter += 1
@@ -722,8 +851,7 @@
try:
if not we_are_translated() and DEBUG_WRAPPER:
print >>sys.stderr, callable,
- assert len(args) == (len(callable.api_func.argtypes) +
- pygilstate_ensure)
+ assert len(args) == expected_nb_args
for i, (typ, is_wrapped) in argtypes_enum_ui:
arg = args[i]
if is_PyObject(typ) and is_wrapped:
@@ -757,41 +885,31 @@
failed = False
if failed:
- error_value = callable.api_func.error_value
if error_value is CANNOT_FAIL:
- raise SystemError("The function '%s' was not supposed to fail"
- % (callable.__name__,))
+ raise not_supposed_to_fail(nameof(callable))
retval = error_value
- elif is_PyObject(callable.api_func.restype):
+ elif is_PyObject(restype):
if is_pyobj(result):
- retval = result
+ if result_kind != "L":
+ raise invalid("missing result_is_ll=True")
else:
- if result is not None:
- if callable.api_func.result_borrowed:
- retval = as_pyobj(space, result)
- else:
- retval = make_ref(space, result)
- retval = rffi.cast(callable.api_func.restype, retval)
+ if result_kind == "L":
+ raise invalid("result_is_ll=True but not ll PyObject")
+ if result_kind == "B": # borrowed
+ result = as_pyobj(space, result)
else:
- retval = lltype.nullptr(PyObject.TO)
- elif callable.api_func.restype is not lltype.Void:
- retval = rffi.cast(callable.api_func.restype, result)
+ result = make_ref(space, result)
+ retval = rffi.cast(restype, result)
+
+ elif restype is not lltype.Void:
+ retval = rffi.cast(restype, result)
+
except Exception, e:
- print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__
- print 'Either report a bug or consider not using this particular extension'
- if not we_are_translated():
- if tb is None:
- tb = sys.exc_info()[2]
- import traceback
- traceback.print_exc()
- if sys.stdout == sys.__stdout__:
- import pdb; pdb.post_mortem(tb)
- # we can't do much here, since we're in ctypes, swallow
- else:
- print str(e)
- pypy_debug_catch_fatal_exception()
- assert False
+ unexpected_exception(nameof(callable), e, tb)
+ return fatal_value
+
+ assert lltype.typeOf(retval) == restype
rffi.stackcounter.stacks_counter -= 1
# see "Handling of the GIL" above
@@ -808,9 +926,9 @@
cpyext_glob_tid_ptr[0] = tid
return retval
- callable._always_inline_ = 'try'
- wrapper.__name__ = "wrapper for %r" % (callable, )
- return wrapper
+
+ wrapper_second_level._dont_inline_ = True
+ return wrapper_second_level
def process_va_name(name):
return name.replace('*', '_star')
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -124,7 +124,7 @@
#_______________________________________________________________________
- at cpython_api([CONST_STRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyString_FromStringAndSize(space, char_p, length):
if char_p:
s = rffi.charpsize2str(char_p, length)
diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py
--- a/pypy/module/cpyext/frameobject.py
+++ b/pypy/module/cpyext/frameobject.py
@@ -67,7 +67,8 @@
track_reference(space, py_obj, w_obj)
return w_obj
- at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject)
+ at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject,
+ result_is_ll=True)
def PyFrame_New(space, tstate, w_code, w_globals, w_locals):
typedescr = get_typedescr(PyFrame.typedef)
py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef))
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -34,11 +34,11 @@
def PyObject_Free(space, ptr):
lltype.free(ptr, flavor='raw')
- at cpython_api([PyTypeObjectPtr], PyObject)
+ at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True)
def _PyObject_New(space, type):
return _PyObject_NewVar(space, type, 0)
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def _PyObject_NewVar(space, type, itemcount):
w_type = from_ref(space, rffi.cast(PyObject, type))
assert isinstance(w_type, W_TypeObject)
@@ -63,7 +63,7 @@
if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE:
Py_DecRef(space, rffi.cast(PyObject, pto))
- at cpython_api([PyTypeObjectPtr], PyObject)
+ at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True)
def _PyObject_GC_New(space, type):
return _PyObject_New(space, type)
@@ -193,7 +193,7 @@
space.delitem(w_obj, w_key)
return 0
- at cpython_api([PyObject, PyTypeObjectPtr], PyObject)
+ at cpython_api([PyObject, PyTypeObjectPtr], PyObject, result_is_ll=True)
def PyObject_Init(space, obj, type):
"""Initialize a newly-allocated object op with its type and initial
reference. Returns the initialized object. If type indicates that the
@@ -207,7 +207,7 @@
obj.c_ob_refcnt = 1
return obj
- at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def PyObject_InitVar(space, py_obj, type, size):
"""This does everything PyObject_Init() does, and also initializes the
length information for a variable-size object."""
@@ -308,7 +308,7 @@
w_res = PyObject_RichCompare(space, ref1, ref2, opid)
return int(space.is_true(w_res))
- at cpython_api([PyObject], PyObject)
+ at cpython_api([PyObject], PyObject, result_is_ll=True)
def PyObject_SelfIter(space, ref):
"""Undocumented function, this is what CPython does."""
Py_IncRef(space, ref)
diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py
--- a/pypy/module/cpyext/pystate.py
+++ b/pypy/module/cpyext/pystate.py
@@ -168,8 +168,16 @@
state = space.fromcache(InterpreterState)
return state.get_thread_state(space)
- at cpython_api([], PyObject, error=CANNOT_FAIL)
+ at cpython_api([], PyObject, result_is_ll=True, error=CANNOT_FAIL)
def PyThreadState_GetDict(space):
+ """Return a dictionary in which extensions can store thread-specific state
+ information. Each extension should use a unique key to use to store state in
+ the dictionary. It is okay to call this function when no current thread state
+ is available. If this function returns NULL, no exception has been raised and
+ the caller should assume no current thread state is available.
+
+ Previously this could only be called when a current thread is active, and NULL
+ meant that an exception was raised."""
state = space.fromcache(InterpreterState)
return state.get_thread_state(space).c_dict
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -1156,19 +1156,6 @@
PyInterpreterState_Clear()."""
raise NotImplementedError
- at cpython_api([], PyObject)
-def PyThreadState_GetDict(space):
- """Return a dictionary in which extensions can store thread-specific state
- information. Each extension should use a unique key to use to store state in
- the dictionary. It is okay to call this function when no current thread state
- is available. If this function returns NULL, no exception has been raised and
- the caller should assume no current thread state is available.
-
- Previously this could only be called when a current thread is active, and NULL
- meant that an exception was raised."""
- borrow_from()
- raise NotImplementedError
-
@cpython_api([lltype.Signed, PyObject], rffi.INT_real, error=CANNOT_FAIL)
def PyThreadState_SetAsyncExc(space, id, exc):
"""Asynchronously raise an exception in a thread. The id argument is the thread
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -365,6 +365,8 @@
assert "in test_PyErr_Display\n" in output
assert "ZeroDivisionError" in output
+ @pytest.mark.skipif(True, reason=
+ "XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free")
def test_GetSetExcInfo(self):
import sys
if self.runappdirect and (sys.version_info.major < 3 or
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -127,7 +127,7 @@
#_______________________________________________________________________
- at cpython_api([Py_ssize_t], PyObject)
+ at cpython_api([Py_ssize_t], PyObject, result_is_ll=True)
def PyTuple_New(space, size):
return rffi.cast(PyObject, new_empty_tuple(space, size))
@@ -150,7 +150,8 @@
decref(space, old_ref)
return 0
- at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True)
+ at cpython_api([PyObject, Py_ssize_t], PyObject,
+ result_borrowed=True, result_is_ll=True)
def PyTuple_GetItem(space, ref, index):
if not tuple_check_ref(space, ref):
PyErr_BadInternalCall(space)
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -752,7 +752,7 @@
w_type2 = from_ref(space, rffi.cast(PyObject, b))
return int(abstract_issubclass_w(space, w_type1, w_type2)) #XXX correct?
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def PyType_GenericAlloc(space, type, nitems):
from pypy.module.cpyext.object import _PyObject_NewVar
return _PyObject_NewVar(space, type, nitems)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -328,7 +328,7 @@
return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict')
- at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromUnicode(space, wchar_p, length):
"""Create a Unicode Object from the Py_UNICODE buffer u of the given size. u
may be NULL which causes the contents to be undefined. It is the user's
@@ -342,14 +342,14 @@
else:
return rffi.cast(PyObject, new_empty_unicode(space, length))
- at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromWideChar(space, wchar_p, length):
"""Create a Unicode object from the wchar_t buffer w of the given size.
Return NULL on failure."""
# PyPy supposes Py_UNICODE == wchar_t
return PyUnicode_FromUnicode(space, wchar_p, length)
- at cpython_api([PyObject, CONST_STRING], PyObject)
+ at cpython_api([PyObject, CONST_STRING], PyObject, result_is_ll=True)
def _PyUnicode_AsDefaultEncodedString(space, ref, errors):
# Returns a borrowed reference.
py_uni = rffi.cast(PyUnicodeObject, ref)
@@ -430,7 +430,7 @@
w_str = space.wrap(rffi.charp2str(s))
return space.call_method(w_str, 'decode', space.wrap("utf-8"))
- at cpython_api([CONST_STRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromStringAndSize(space, s, size):
"""Create a Unicode Object from the char buffer u. The bytes will be
interpreted as being UTF-8 encoded. u may also be NULL which causes the
From pypy.commits at gmail.com Sun May 1 02:42:41 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 30 Apr 2016 23:42:41 -0700 (PDT)
Subject: [pypy-commit] pypy default: Document branch
Message-ID: <5725a561.d72d1c0a.a82ff.ffffcfbb@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84086:e4719d2c29a7
Date: 2016-05-01 08:42 +0200
http://bitbucket.org/pypy/pypy/changeset/e4719d2c29a7/
Log: Document branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -45,7 +45,12 @@
- improve tracking of PyObject to rpython object mapping
- support tp_as_{number, sequence, mapping, buffer} slots
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
.. branch: share-mapdict-methods-2
Reduce generated code for subclasses by using the same function objects in all
generated subclasses.
+
+.. branch: share-cpyext-cpython-api
From pypy.commits at gmail.com Sun May 1 03:05:36 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sun, 01 May 2016 00:05:36 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge branch cpyext-test-A. This gets
all cpyext tests passing under -A.
Message-ID: <5725aac0.161b1c0a.d0e1f.ffffd68f@mx.google.com>
Author: Devin Jeanpierre
Branch:
Changeset: r84087:ae51d22a2c25
Date: 2016-05-01 00:04 -0700
http://bitbucket.org/pypy/pypy/changeset/ae51d22a2c25/
Log: merge branch cpyext-test-A. This gets all cpyext tests passing under
-A.
All the fixes are either by changing behavior to do the CPython-
compatible thing, "#ifdef PYPY_VERSION" to have PyPy-specific test
behavior, or else a @pytest.mark.skipif for tests that shouldn't be
run in CPython at all.
diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -109,7 +109,7 @@
Py_RETURN_NONE;
"""
)
- ])
+ ], prologue='#include "datetime.h"\n')
import datetime
assert module.get_types() == (datetime.date,
datetime.datetime,
diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py
--- a/pypy/module/cpyext/test/test_dictobject.py
+++ b/pypy/module/cpyext/test/test_dictobject.py
@@ -181,6 +181,7 @@
if (!PyArg_ParseTuple(args, "O", &dict))
return NULL;
proxydict = PyDictProxy_New(dict);
+#ifdef PYPY_VERSION // PyDictProxy_Check[Exact] are PyPy-specific.
if (!PyDictProxy_Check(proxydict)) {
Py_DECREF(proxydict);
PyErr_SetNone(PyExc_ValueError);
@@ -191,6 +192,7 @@
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
+#endif // PYPY_VERSION
i = PyObject_Size(proxydict);
Py_DECREF(proxydict);
return PyLong_FromLong(i);
diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py
--- a/pypy/module/cpyext/test/test_thread.py
+++ b/pypy/module/cpyext/test/test_thread.py
@@ -1,9 +1,12 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class AppTestThread(AppTestCpythonExtensionBase):
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_get_thread_ident(self):
module = self.import_extension('foo', [
("get_thread_ident", "METH_NOARGS",
@@ -30,6 +33,7 @@
assert results[0][0] != results[1][0]
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_acquire_lock(self):
module = self.import_extension('foo', [
("test_acquire_lock", "METH_NOARGS",
@@ -53,13 +57,14 @@
])
module.test_acquire_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_release_lock(self):
module = self.import_extension('foo', [
("test_release_lock", "METH_NOARGS",
"""
#ifndef PyThread_release_lock
#error "seems we are not accessing PyPy's functions"
-#endif
+#endif
PyThread_type_lock lock = PyThread_allocate_lock();
PyThread_acquire_lock(lock, 1);
PyThread_release_lock(lock);
@@ -74,6 +79,7 @@
])
module.test_release_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_tls(self):
module = self.import_extension('foo', [
("create_key", "METH_NOARGS",
diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py
--- a/pypy/module/cpyext/test/test_tupleobject.py
+++ b/pypy/module/cpyext/test/test_tupleobject.py
@@ -84,7 +84,14 @@
"""
PyObject *item = PyTuple_New(0);
PyObject *t = PyTuple_New(1);
- if (t->ob_refcnt != 1 || item->ob_refcnt != 1) {
+#ifdef PYPY_VERSION
+ // PyPy starts even empty tuples with a refcount of 1.
+ const int initial_item_refcount = 1;
+#else
+ // CPython can cache ().
+ const int initial_item_refcount = item->ob_refcnt;
+#endif // PYPY_VERSION
+ if (t->ob_refcnt != 1 || item->ob_refcnt != initial_item_refcount) {
PyErr_SetString(PyExc_SystemError, "bad initial refcnt");
return NULL;
}
@@ -94,8 +101,8 @@
PyErr_SetString(PyExc_SystemError, "SetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "SetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
@@ -109,8 +116,8 @@
PyErr_SetString(PyExc_SystemError, "GetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
return t;
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -24,8 +24,11 @@
if(PyUnicode_GetSize(s) != 11) {
result = -PyUnicode_GetSize(s);
}
+#ifdef PYPY_VERSION
+ // Slightly silly test that tp_basicsize is reasonable.
if(s->ob_type->tp_basicsize != sizeof(void*)*7)
result = s->ob_type->tp_basicsize;
+#endif // PYPY_VERSION
Py_DECREF(s);
return PyLong_FromLong(result);
"""),
@@ -85,8 +88,11 @@
'''
),
])
- res = module.test_hash(u"xyz")
- assert res == hash(u'xyz')
+ obj = u'xyz'
+ # CPython in particular does not precompute ->hash, so we need to call
+ # hash() first.
+ expected_hash = hash(obj)
+ assert module.test_hash(obj) == expected_hash
def test_default_encoded_string(self):
module = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -1,4 +1,6 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
@@ -22,8 +24,6 @@
PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION);
PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION);
PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION);
- PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
- PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
}
"""
module = self.import_module(name='foo', init=init)
@@ -31,6 +31,18 @@
assert module.py_major_version == sys.version_info.major
assert module.py_minor_version == sys.version_info.minor
assert module.py_micro_version == sys.version_info.micro
+
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
+ def test_pypy_versions(self):
+ import sys
+ init = """
+ if (Py_IsInitialized()) {
+ PyObject *m = Py_InitModule("foo", NULL);
+ PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
+ PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
+ }
+ """
+ module = self.import_module(name='foo', init=init)
v = sys.pypy_version_info
s = '%d.%d.%d' % (v[0], v[1], v[2])
if v.releaselevel != 'final':
From pypy.commits at gmail.com Sun May 1 05:10:51 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 02:10:51 -0700 (PDT)
Subject: [pypy-commit] pypy default: Comment about the __del__ logic in
typedef.py, and point to issue #2287.
Message-ID: <5725c81b.2a18c20a.a67b2.ffffeb3c@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84088:a3c5885e4925
Date: 2016-05-01 11:10 +0200
http://bitbucket.org/pypy/pypy/changeset/a3c5885e4925/
Log: Comment about the __del__ logic in typedef.py, and point to issue
#2287.
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -159,6 +159,18 @@
copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
if wants_del:
+ # This subclass comes with an app-level __del__. To handle
+ # it, we make an RPython-level __del__ method. This
+ # RPython-level method is called directly by the GC and it
+ # cannot do random things (calling the app-level __del__ would
+ # be "random things"). So instead, we just call here
+ # enqueue_for_destruction(), and the app-level __del__ will be
+ # called later at a safe point (typically between bytecodes).
+ # If there is also an inherited RPython-level __del__, it is
+ # called afterwards---not immediately! This base
+ # RPython-level __del__ is supposed to run only when the
+ # object is not reachable any more. NOTE: it doesn't fully
+ # work: see issue #2287.
name += "Del"
parent_destructor = getattr(cls, '__del__', None)
def call_parent_del(self):
From pypy.commits at gmail.com Sun May 1 05:16:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 02:16:35 -0700 (PDT)
Subject: [pypy-commit] pypy default: Backed out changeset 1cb2c3897dbb
Message-ID: <5725c973.821b1c0a.1195c.3e0e@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84089:2d9f54097bd9
Date: 2016-05-01 11:15 +0200
http://bitbucket.org/pypy/pypy/changeset/2d9f54097bd9/
Log: Backed out changeset 1cb2c3897dbb
It makes a single RPython subclass instead of two if the base
RPython class has already got a __del__. But this base __del__ might
be lightweight; then the RPython subclass will always have a
heavyweight finalizer...
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -383,25 +383,6 @@
assert not hasattr(b, "storage")
assert hasattr(c, "storage")
- def test_del(self):
- space = self.space
- a, b, c, d = space.unpackiterable(space.appexec([], """():
- class A(object):
- pass
- class B(object):
- def __del__(self):
- pass
- class F(file):
- pass
- class G(file):
- def __del__(self):
- pass
- return A(), B(), F("xyz", "w"), G("ghi", "w")
- """))
- assert type(b).__base__ is type(a)
- assert hasattr(c, "__del__")
- assert type(d) is type(c)
-
class AppTestTypeDef:
def setup_class(cls):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -113,18 +113,11 @@
return _subclass_cache[key]
except KeyError:
# XXX can save a class if cls already has a __del__
- keys = [key]
- base_has_del = hasattr(cls, '__del__')
- if base_has_del:
- # if the base has a __del__, we only need one class
- keys = [(space, cls, True), (space, cls, False)]
- needsdel = True
- elif needsdel:
+ if needsdel:
cls = get_unique_interplevel_subclass(space, cls, False)
subcls = _getusercls(space, cls, needsdel)
assert key not in _subclass_cache
- for key in keys:
- _subclass_cache[key] = subcls
+ _subclass_cache[key] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
@@ -140,24 +133,20 @@
name = cls.__name__ + "User"
mixins_needed = []
- copy_methods = []
- mixins_needed = []
- name = cls.__name__
- if not cls.user_overridden_class:
- if cls is W_ObjectObject or cls is W_InstanceObject:
- mixins_needed.append(_make_storage_mixin_size_n())
- else:
- mixins_needed.append(MapdictStorageMixin)
- copy_methods = [BaseUserClassMapdict]
- if reallywantdict or not typedef.hasdict:
- # the type has no dict, mapdict to provide the dict
- copy_methods.append(MapdictDictSupport)
- name += "Dict"
- if not typedef.weakrefable:
- # the type does not support weakrefs yet, mapdict to provide weakref
- # support
- copy_methods.append(MapdictWeakrefSupport)
- name += "Weakrefable"
+ if cls is W_ObjectObject or cls is W_InstanceObject:
+ mixins_needed.append(_make_storage_mixin_size_n())
+ else:
+ mixins_needed.append(MapdictStorageMixin)
+ copy_methods = [BaseUserClassMapdict]
+ if reallywantdict or not typedef.hasdict:
+ # the type has no dict, mapdict to provide the dict
+ copy_methods.append(MapdictDictSupport)
+ name += "Dict"
+ if not typedef.weakrefable:
+ # the type does not support weakrefs yet, mapdict to provide weakref
+ # support
+ copy_methods.append(MapdictWeakrefSupport)
+ name += "Weakrefable"
if wants_del:
# This subclass comes with an app-level __del__. To handle
# it, we make an RPython-level __del__ method. This
From pypy.commits at gmail.com Sun May 1 05:35:48 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 02:35:48 -0700 (PDT)
Subject: [pypy-commit] pypy default: Silence this debug print
Message-ID: <5725cdf4.0e711c0a.e8ef6.088e@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84090:cea64c2b66ee
Date: 2016-05-01 11:32 +0200
http://bitbucket.org/pypy/pypy/changeset/cea64c2b66ee/
Log: Silence this debug print
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -749,7 +749,7 @@
wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
signature)
cache.stats[0] += 1
- print 'Wrapper cache [wrappers/total]:', cache.stats
+ #print 'Wrapper cache [wrappers/total]:', cache.stats
return wrapper_gen.make_wrapper(callable)
From pypy.commits at gmail.com Sun May 1 05:35:49 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 02:35:49 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix PyString_Concat and
PyString_ConcatAndDel to do the right thing
Message-ID: <5725cdf5.d81a1c0a.5dcde.040f@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84091:5e6d2531c7c9
Date: 2016-05-01 11:35 +0200
http://bitbucket.org/pypy/pypy/changeset/5e6d2531c7c9/
Log: Fix PyString_Concat and PyString_ConcatAndDel to do the right thing
with reference counts (I think)
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -6,7 +6,7 @@
from pypy.module.cpyext.pyerrors import PyErr_BadArgument
from pypy.module.cpyext.pyobject import (
PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference,
- make_typedescr, get_typedescr, as_pyobj, Py_IncRef)
+ make_typedescr, get_typedescr, as_pyobj, Py_IncRef, get_w_obj_and_decref)
##
## Implementation of PyStringObject
@@ -233,7 +233,7 @@
def _PyString_Eq(space, w_str1, w_str2):
return space.eq_w(w_str1, w_str2)
- at cpython_api([PyObjectP, PyObject], lltype.Void)
+ at cpython_api([PyObjectP, PyObject], lltype.Void, error=None)
def PyString_Concat(space, ref, w_newpart):
"""Create a new string object in *string containing the contents of newpart
appended to string; the caller will own the new reference. The reference to
@@ -241,26 +241,27 @@
the old reference to string will still be discarded and the value of
*string will be set to NULL; the appropriate exception will be set."""
- if not ref[0]:
+ old = ref[0]
+ if not old:
return
- if w_newpart is None or not PyString_Check(space, ref[0]) or not \
- (space.isinstance_w(w_newpart, space.w_str) or
- space.isinstance_w(w_newpart, space.w_unicode)):
- Py_DecRef(space, ref[0])
- ref[0] = lltype.nullptr(PyObject.TO)
- return
- w_str = from_ref(space, ref[0])
- w_newstr = space.add(w_str, w_newpart)
- ref[0] = make_ref(space, w_newstr)
- Py_IncRef(space, ref[0])
+ ref[0] = lltype.nullptr(PyObject.TO)
+ w_str = get_w_obj_and_decref(space, old)
+ if w_newpart is not None and PyString_Check(space, old):
+ # xxx if w_newpart is not a string or unicode or bytearray,
+ # this might call __radd__() on it, whereas CPython raises
+ # a TypeError in this case.
+ w_newstr = space.add(w_str, w_newpart)
+ ref[0] = make_ref(space, w_newstr)
- at cpython_api([PyObjectP, PyObject], lltype.Void)
+ at cpython_api([PyObjectP, PyObject], lltype.Void, error=None)
def PyString_ConcatAndDel(space, ref, newpart):
"""Create a new string object in *string containing the contents of newpart
appended to string. This version decrements the reference count of newpart."""
- PyString_Concat(space, ref, newpart)
- Py_DecRef(space, newpart)
+ try:
+ PyString_Concat(space, ref, newpart)
+ finally:
+ Py_DecRef(space, newpart)
@cpython_api([PyObject, PyObject], PyObject)
def PyString_Format(space, w_format, w_args):
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -359,6 +359,7 @@
assert space.str_w(from_ref(space, ptr[0])) == 'abcdef'
api.PyString_Concat(ptr, space.w_None)
assert not ptr[0]
+ api.PyErr_Clear()
ptr[0] = lltype.nullptr(PyObject.TO)
api.PyString_Concat(ptr, space.wrap('def')) # should not crash
lltype.free(ptr, flavor='raw')
From pypy.commits at gmail.com Sun May 1 05:49:29 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 02:49:29 -0700 (PDT)
Subject: [pypy-commit] pypy default: GIL handling fix: must use
generic_cpy_call() instead of directly
Message-ID: <5725d129.c9b0c20a.e1f64.fffff224@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84092:1a066795ff46
Date: 2016-05-01 11:45 +0200
http://bitbucket.org/pypy/pypy/changeset/1a066795ff46/
Log: GIL handling fix: must use generic_cpy_call() instead of directly
calling some type slots
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.bytesobject import new_empty_str, PyStringObject
-from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP
+from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call
from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref
from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
@@ -339,13 +339,16 @@
c_buf = py_str.c_ob_type.c_tp_as_buffer
assert c_buf
py_obj = rffi.cast(PyObject, py_str)
- assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1
+ assert generic_cpy_call(space, c_buf.c_bf_getsegcount,
+ py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1
ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
- assert c_buf.c_bf_getsegcount(py_obj, ref) == 1
+ assert generic_cpy_call(space, c_buf.c_bf_getsegcount,
+ py_obj, ref) == 1
assert ref[0] == 10
lltype.free(ref, flavor='raw')
ref = lltype.malloc(rffi.VOIDPP.TO, 1, flavor='raw')
- assert c_buf.c_bf_getreadbuffer(py_obj, 0, ref) == 10
+ assert generic_cpy_call(space, c_buf.c_bf_getreadbuffer,
+ py_obj, 0, ref) == 10
lltype.free(ref, flavor='raw')
Py_DecRef(space, py_obj)
From pypy.commits at gmail.com Sun May 1 05:49:31 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 02:49:31 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix another test using
PyString_Concat() in a way that is now
Message-ID: <5725d12b.4412c30a.fec01.ffffe61d@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84093:f05e9998737c
Date: 2016-05-01 11:49 +0200
http://bitbucket.org/pypy/pypy/changeset/f05e9998737c/
Log: Fix another test using PyString_Concat() in a way that is now
crashing---and wrong according to the CPython documentation
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -145,6 +145,7 @@
"""
PyObject ** v;
PyObject * left = PyTuple_GetItem(args, 0);
+ Py_INCREF(left); /* the reference will be stolen! */
v = &left;
PyString_Concat(v, PyTuple_GetItem(args, 1));
return *v;
From pypy.commits at gmail.com Sun May 1 06:09:55 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 03:09:55 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-auto-gil: When some PyXxx() function is
called without the GIL, we already detect
Message-ID: <5725d5f3.8bd31c0a.50429.1781@mx.google.com>
Author: Armin Rigo
Branch: cpyext-auto-gil
Changeset: r84094:bfd2cd24cee2
Date: 2016-05-01 12:09 +0200
http://bitbucket.org/pypy/pypy/changeset/bfd2cd24cee2/
Log: When some PyXxx() function is called without the GIL, we already
detect this case. On "default" we then complain loudly. Maybe we
should instead silently acquire/release the GIL. This would allow
this case to work: CPython C extension modules might call some
"simple" CPython PyXxx() functions without the GIL and hope that
their implementation is kept simple enough.
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -790,6 +790,8 @@
from rpython.rlib import rgil
argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
fatal_value = restype._defl()
+ gil_auto_workaround = (gil is None) # automatically detect when we don't
+ # have the GIL, and acquire/release it
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
pygilstate_ensure = (gil == "pygilstate_ensure")
@@ -825,7 +827,8 @@
# see "Handling of the GIL" above (careful, we don't have the GIL here)
tid = rthread.get_or_make_ident()
- if gil_acquire:
+ _gil_auto = (gil_auto_workaround and cpyext_glob_tid_ptr[0] != tid)
+ if gil_acquire or _gil_auto:
if cpyext_glob_tid_ptr[0] == tid:
deadlock_error(nameof(callable))
rgil.acquire()
@@ -919,7 +922,7 @@
arg = rffi.cast(lltype.Signed, args[-1])
unlock = (arg == pystate.PyGILState_UNLOCKED)
else:
- unlock = gil_release
+ unlock = gil_release or _gil_auto
if unlock:
rgil.release()
else:
From pypy.commits at gmail.com Sun May 1 07:30:10 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Sun, 01 May 2016 04:30:10 -0700 (PDT)
Subject: [pypy-commit] pypy default: Use "must be unicode,
not %T" in unicodedata TypeErrors.
Message-ID: <5725e8c2.4849c20a.20f57.145e@mx.google.com>
Author: Devin Jeanpierre
Branch:
Changeset: r84095:c5edfa7c9d6e
Date: 2016-05-01 04:29 -0700
http://bitbucket.org/pypy/pypy/changeset/c5edfa7c9d6e/
Log: Use "must be unicode, not %T" in unicodedata TypeErrors.
diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py
--- a/pypy/module/unicodedata/interp_ucd.py
+++ b/pypy/module/unicodedata/interp_ucd.py
@@ -4,7 +4,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.objectmodel import we_are_translated
@@ -34,8 +34,9 @@
# Target is wide build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode == 0xFFFF:
# Host CPython is narrow build, accept surrogates
@@ -54,8 +55,9 @@
# Target is narrow build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode > 0xFFFF:
# Host CPython is wide build, forbid surrogates
@@ -179,7 +181,9 @@
@unwrap_spec(form=str)
def normalize(self, space, form, w_unistr):
if not space.isinstance_w(w_unistr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap('argument 2 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 2 must be unicode, not %T',
+ w_unistr)
if form == 'NFC':
composed = True
decomposition = self._canon_decomposition
diff --git a/pypy/module/unicodedata/test/test_unicodedata.py b/pypy/module/unicodedata/test/test_unicodedata.py
--- a/pypy/module/unicodedata/test/test_unicodedata.py
+++ b/pypy/module/unicodedata/test/test_unicodedata.py
@@ -78,10 +78,15 @@
import unicodedata
assert unicodedata.lookup("GOTHIC LETTER FAIHU") == u'\U00010346'
- def test_normalize(self):
+ def test_normalize_bad_argcount(self):
import unicodedata
raises(TypeError, unicodedata.normalize, 'x')
+ def test_normalize_nonunicode(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.normalize, 'NFC', 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
+
@py.test.mark.skipif("sys.maxunicode < 0x10ffff")
def test_normalize_wide(self):
import unicodedata
@@ -103,6 +108,12 @@
# For no reason, unicodedata.mirrored() returns an int, not a bool
assert repr(unicodedata.mirrored(u' ')) == '0'
- def test_bidirectional(self):
+ def test_bidirectional_not_one_character(self):
import unicodedata
- raises(TypeError, unicodedata.bidirectional, u'xx')
+ exc_info = raises(TypeError, unicodedata.bidirectional, u'xx')
+ assert str(exc_info.value) == 'need a single Unicode character as parameter'
+
+ def test_bidirectional_not_one_character(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.bidirectional, 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
From pypy.commits at gmail.com Sun May 1 09:38:22 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 06:38:22 -0700 (PDT)
Subject: [pypy-commit] pypy default: Remove these two lines,
which simply causes an exception to be printed
Message-ID: <572606ce.08a81c0a.e51f3.5b99@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84096:d789f9d98fc2
Date: 2016-05-01 13:53 +0200
http://bitbucket.org/pypy/pypy/changeset/d789f9d98fc2/
Log: Remove these two lines, which simply causes an exception to be
printed to stderr and otherwise ignored (both with and without -A)
diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py
--- a/pypy/module/_io/test/test_bufferedio.py
+++ b/pypy/module/_io/test/test_bufferedio.py
@@ -307,7 +307,6 @@
class MyIO(_io.BufferedWriter):
def __del__(self):
record.append(1)
- super(MyIO, self).__del__()
def close(self):
record.append(2)
super(MyIO, self).close()
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -88,7 +88,6 @@
class MyIO(io.IOBase):
def __del__(self):
record.append(1)
- super(MyIO, self).__del__()
def close(self):
record.append(2)
super(MyIO, self).close()
From pypy.commits at gmail.com Sun May 1 09:38:26 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 06:38:26 -0700 (PDT)
Subject: [pypy-commit] pypy default: hg merge cpyext-auto-gil
Message-ID: <572606d2.d2aa1c0a.1ecec.ffffe4c0@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84098:22204fd13f81
Date: 2016-05-01 15:35 +0200
http://bitbucket.org/pypy/pypy/changeset/22204fd13f81/
Log: hg merge cpyext-auto-gil
When some PyXxx() function is called without the GIL, we already
detect this case. Previously we would complain loudly. With this
change, we instead silently acquire/release the GIL. This seems to
make numpy happy: it contains calls to some "simple" PyXxx()
functions without the GIL, hoping that their implementation is kept
simple enough, and expect no problem from that.
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -790,6 +790,8 @@
from rpython.rlib import rgil
argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
fatal_value = restype._defl()
+ gil_auto_workaround = (gil is None) # automatically detect when we don't
+ # have the GIL, and acquire/release it
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
pygilstate_ensure = (gil == "pygilstate_ensure")
@@ -825,7 +827,8 @@
# see "Handling of the GIL" above (careful, we don't have the GIL here)
tid = rthread.get_or_make_ident()
- if gil_acquire:
+ _gil_auto = (gil_auto_workaround and cpyext_glob_tid_ptr[0] != tid)
+ if gil_acquire or _gil_auto:
if cpyext_glob_tid_ptr[0] == tid:
deadlock_error(nameof(callable))
rgil.acquire()
@@ -919,7 +922,7 @@
arg = rffi.cast(lltype.Signed, args[-1])
unlock = (arg == pystate.PyGILState_UNLOCKED)
else:
- unlock = gil_release
+ unlock = gil_release or _gil_auto
if unlock:
rgil.release()
else:
From pypy.commits at gmail.com Sun May 1 09:38:28 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 06:38:28 -0700 (PDT)
Subject: [pypy-commit] pypy default: document branch
Message-ID: <572606d4.10691c0a.b3a6a.573e@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84099:c9fd80001f59
Date: 2016-05-01 15:37 +0200
http://bitbucket.org/pypy/pypy/changeset/c9fd80001f59/
Log: document branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -54,3 +54,10 @@
generated subclasses.
.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
From pypy.commits at gmail.com Sun May 1 09:38:29 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 06:38:29 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge heads
Message-ID: <572606d5.2472c20a.cee8.0b3b@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84100:bf4e328270ce
Date: 2016-05-01 15:38 +0200
http://bitbucket.org/pypy/pypy/changeset/bf4e328270ce/
Log: merge heads
diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py
--- a/pypy/module/unicodedata/interp_ucd.py
+++ b/pypy/module/unicodedata/interp_ucd.py
@@ -4,7 +4,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.objectmodel import we_are_translated
@@ -34,8 +34,9 @@
# Target is wide build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode == 0xFFFF:
# Host CPython is narrow build, accept surrogates
@@ -54,8 +55,9 @@
# Target is narrow build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode > 0xFFFF:
# Host CPython is wide build, forbid surrogates
@@ -179,7 +181,9 @@
@unwrap_spec(form=str)
def normalize(self, space, form, w_unistr):
if not space.isinstance_w(w_unistr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap('argument 2 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 2 must be unicode, not %T',
+ w_unistr)
if form == 'NFC':
composed = True
decomposition = self._canon_decomposition
diff --git a/pypy/module/unicodedata/test/test_unicodedata.py b/pypy/module/unicodedata/test/test_unicodedata.py
--- a/pypy/module/unicodedata/test/test_unicodedata.py
+++ b/pypy/module/unicodedata/test/test_unicodedata.py
@@ -78,10 +78,15 @@
import unicodedata
assert unicodedata.lookup("GOTHIC LETTER FAIHU") == u'\U00010346'
- def test_normalize(self):
+ def test_normalize_bad_argcount(self):
import unicodedata
raises(TypeError, unicodedata.normalize, 'x')
+ def test_normalize_nonunicode(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.normalize, 'NFC', 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
+
@py.test.mark.skipif("sys.maxunicode < 0x10ffff")
def test_normalize_wide(self):
import unicodedata
@@ -103,6 +108,12 @@
# For no reason, unicodedata.mirrored() returns an int, not a bool
assert repr(unicodedata.mirrored(u' ')) == '0'
- def test_bidirectional(self):
+ def test_bidirectional_not_one_character(self):
import unicodedata
- raises(TypeError, unicodedata.bidirectional, u'xx')
+ exc_info = raises(TypeError, unicodedata.bidirectional, u'xx')
+ assert str(exc_info.value) == 'need a single Unicode character as parameter'
+
+ def test_bidirectional_not_one_character(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.bidirectional, 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
From pypy.commits at gmail.com Sun May 1 09:38:24 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 06:38:24 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-auto-gil: Ready to merge,
numpy tests seem happy
Message-ID: <572606d0.455ec20a.aa6df.3e75@mx.google.com>
Author: Armin Rigo
Branch: cpyext-auto-gil
Changeset: r84097:791761b33df6
Date: 2016-05-01 15:33 +0200
http://bitbucket.org/pypy/pypy/changeset/791761b33df6/
Log: Ready to merge, numpy tests seem happy
From pypy.commits at gmail.com Sun May 1 09:38:54 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 06:38:54 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Make a new attempt similar to the
unmerged 'gc-del' branch, with a
Message-ID: <572606ee.161b1c0a.d0e1f.57a0@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84101:de156559ad08
Date: 2016-05-01 14:04 +0200
http://bitbucket.org/pypy/pypy/changeset/de156559ad08/
Log: Make a new attempt similar to the unmerged 'gc-del' branch, with a
smaller scope: the goal is only to add rgc.register_finalizer()
From pypy.commits at gmail.com Sun May 1 10:03:12 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 07:03:12 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Update docs with the goal
Message-ID: <57260ca0.22acc20a.2b9b.2b6a@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84102:0cebe4cdc049
Date: 2016-05-01 16:03 +0200
http://bitbucket.org/pypy/pypy/changeset/0cebe4cdc049/
Log: Update docs with the goal
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,118 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However (like "lightweight finalizers" used
+ to be), all ``__del__()`` methods must only contain simple enough
+ code, and this is checked. We call this "destructors". They can't
+ use operations that would resurrect the object, for example.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when there is no more reference to an object. Intended for
+objects that just need to free a block of raw memory or close a file.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects;
+and if you call an external C function, it must be a "safe" function
+(e.g. not releasing the GIL; use ``releasegil=False`` in
+``rffi.llexternal()``).
+
+If there are several objects with destructors that die during the same
+GC cycle, they are called in a completely random order --- but that
+should not matter because destructors cannot do much anyway.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerController`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerController`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+It is not allowed to cumulate several ``FinalizerController``
+instances for objects of the same class. Calling
+``fin.register_finalizer(obj)`` several times for the same ``obj`` is
+fine (and will only register it once).
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +132,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +236,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +247,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/rpython/doc/rpython.rst b/rpython/doc/rpython.rst
--- a/rpython/doc/rpython.rst
+++ b/rpython/doc/rpython.rst
@@ -191,6 +191,12 @@
``__setitem__`` for slicing isn't supported. Additionally, using negative
indices for slicing is still not support, even when using ``__getslice__``.
+ Note that from May 2016 the destructor ``__del__`` must only contain
+ `simple operations`__; for any kind of more complex destructor, see
+ ``rpython.rlib.rgc.register_finalizer()``.
+
+.. __: garbage_collection.html
+
This layout makes the number of types to take care about quite limited.
From pypy.commits at gmail.com Sun May 1 10:21:58 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 07:21:58 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: These are really queues,
similar to Java's queues of objects to
Message-ID: <57261106.81da1c0a.db864.612c@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84103:25ed89f59a32
Date: 2016-05-01 16:22 +0200
http://bitbucket.org/pypy/pypy/changeset/25ed89f59a32/
Log: These are really queues, similar to Java's queues of objects to
finalize.
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -56,12 +56,12 @@
The interface for full finalizers is made with PyPy in mind, but should
be generally useful.
-The idea is that you subclass the ``rgc.FinalizerController`` class::
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
* You must give a class-level attribute ``base_class``, which is the
base class of all instances with a finalizer. (If you need
finalizers on several unrelated classes, you need several unrelated
- ``FinalizerController`` subclasses.)
+ ``FinalizerQueue`` subclasses.)
* You override the ``finalizer_trigger()`` method; see below.
@@ -90,10 +90,10 @@
To find the queued items, call ``fin.next_dead()`` repeatedly. It
returns the next queued item, or ``None`` when the queue is empty.
-It is not allowed to cumulate several ``FinalizerController``
-instances for objects of the same class. Calling
-``fin.register_finalizer(obj)`` several times for the same ``obj`` is
-fine (and will only register it once).
+It is not allowed to cumulate several ``FinalizerQueue`` instances for
+objects of the same class. Calling ``fin.register_finalizer(obj)``
+several times with the same arguments is fine (and will only register
+``obj`` once).
Ordering of finalizers
From pypy.commits at gmail.com Sun May 1 11:50:24 2016
From: pypy.commits at gmail.com (rlamy)
Date: Sun, 01 May 2016 08:50:24 -0700 (PDT)
Subject: [pypy-commit] pypy py3k-update: Don't run -A tests in cpyext
Message-ID: <572625c0.10691c0a.b3a6a.ffff8108@mx.google.com>
Author: Ronan Lamy
Branch: py3k-update
Changeset: r84104:1c3add02e1c7
Date: 2016-05-01 16:49 +0100
http://bitbucket.org/pypy/pypy/changeset/1c3add02e1c7/
Log: Don't run -A tests in cpyext
diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py
--- a/pypy/module/cpyext/test/conftest.py
+++ b/pypy/module/cpyext/test/conftest.py
@@ -11,6 +11,8 @@
space.getbuiltinmodule("time")
def pytest_ignore_collect(path, config):
+ if config.option.runappdirect:
+ return True # "cannot be run by py.test -A"
# ensure additional functions are registered
import pypy.module.cpyext.test.test_cpyext
return False
From pypy.commits at gmail.com Sun May 1 11:53:08 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 08:53:08 -0700 (PDT)
Subject: [pypy-commit] pypy default: Manually reset sys.settrace() and
sys.setprofile() when we're done running.
Message-ID: <57262664.c486c20a.8ce98.6fe9@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84105:ea6e01b797e0
Date: 2016-05-01 16:59 +0100
http://bitbucket.org/pypy/pypy/changeset/ea6e01b797e0/
Log: Manually reset sys.settrace() and sys.setprofile() when we're done
running. This is not exactly what CPython does, but if we get an
exception, unlike CPython, we call functions from the 'traceback'
module, and these would call more the trace/profile function. That's
unexpected and can lead to more crashes at this point.
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -78,7 +78,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
# we arrive here if no exception is raised. stdout cosmetics...
try:
From pypy.commits at gmail.com Sun May 1 12:15:00 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 01 May 2016 09:15:00 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Implement FinalizerQueue as documented
for the emulated on-top-of-cpython mode
Message-ID: <57262b84.878d1c0a.ed012.ffff87ee@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84106:3d4ba6165353
Date: 2016-05-01 18:15 +0200
http://bitbucket.org/pypy/pypy/changeset/3d4ba6165353/
Log: Implement FinalizerQueue as documented for the emulated on-top-of-
cpython mode
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -361,11 +361,106 @@
return func
def must_be_light_finalizer(func):
- func._must_be_light_finalizer_ = True
+ import warnings
+ warnings.warn("@must_be_light_finalizer is implied and has no effect "
+ "any more", DeprecationWarning)
return func
+
+class FinalizerQueue(object):
+ """A finalizer queue. See pypy/doc/discussion/finalizer-order.rst.
+ """
+ # Must be subclassed, and the subclass needs these attributes:
+ #
+ # base_class:
+ # the base class (or only class) of finalized objects
+ #
+ # def finalizer_trigger(self):
+ # called to notify that new items have been put in the queue
+
+ def next_dead(self):
+ "NOT_RPYTHON: special-cased below"
+ try:
+ return self._queue.popleft()
+ except (AttributeError, IndexError):
+ return None
+
+ def register_finalizer(self, obj):
+ "NOT_RPYTHON: special-cased below"
+ assert isinstance(obj, self.base_class)
+
+ if hasattr(obj, '__enable_del_for_id'):
+ return # already called
+
+ if not hasattr(self, '_queue'):
+ import collections
+ self._weakrefs = set()
+ self._queue = collections.deque()
+
+ # Fetch and check the type of 'obj'
+ objtyp = obj.__class__
+ assert isinstance(objtyp, type), (
+ "to run register_finalizer() untranslated, "
+ "the object's class must be new-style")
+ assert hasattr(obj, '__dict__'), (
+ "to run register_finalizer() untranslated, "
+ "the object must have a __dict__")
+ assert not hasattr(obj, '__slots__'), (
+ "to run register_finalizer() untranslated, "
+ "the object must not have __slots__")
+
+ # The first time, patch the method __del__ of the class, if
+ # any, so that we can disable it on the original 'obj' and
+ # enable it only on the 'newobj'
+ _fq_patch_class(objtyp)
+
+ # Build a new shadow object with the same class and dict
+ newobj = object.__new__(objtyp)
+ obj.__dict__ = obj.__dict__.copy() #PyPy: break the dict->obj dependency
+ newobj.__dict__ = obj.__dict__
+
+ # A callback that is invoked when (or after) 'obj' is deleted;
+ # 'newobj' is still kept alive here
+ def callback(wr):
+ self._weakrefs.discard(wr)
+ self._queue.append(newobj)
+ self.finalizer_trigger()
+
+ import weakref
+ wr = weakref.ref(obj, callback)
+ self._weakrefs.add(wr)
+
+ # Disable __del__ on the original 'obj' and enable it only on
+ # the 'newobj'. Use id() and not a regular reference, because
+ # that would make a cycle between 'newobj' and 'obj.__dict__'
+ # (which is 'newobj.__dict__' too).
+ setattr(obj, '__enable_del_for_id', id(newobj))
+
+
+def _fq_patch_class(Cls):
+ if Cls in _fq_patched_classes:
+ return
+ if '__del__' in Cls.__dict__:
+ def __del__(self):
+ if not we_are_translated():
+ try:
+ if getattr(self, '__enable_del_for_id') != id(self):
+ return
+ except AttributeError:
+ pass
+ original_del(self)
+ original_del = Cls.__del__
+ Cls.__del__ = __del__
+ _fq_patched_classes.add(Cls)
+ for BaseCls in Cls.__bases__:
+ _fq_patch_class(BaseCls)
+
+_fq_patched_classes = set()
+
+
# ____________________________________________________________
+
def get_rpy_roots():
"NOT_RPYTHON"
# Return the 'roots' from the GC.
diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py
--- a/rpython/rlib/test/test_rgc.py
+++ b/rpython/rlib/test/test_rgc.py
@@ -252,3 +252,118 @@
t, typer, graph = gengraph(f, [])
assert typer.custom_trace_funcs == [(TP, trace_func)]
+
+
+# ____________________________________________________________
+
+
+class T_Root(object):
+ pass
+
+class T_Int(T_Root):
+ def __init__(self, x):
+ self.x = x
+
+class SimpleFQ(rgc.FinalizerQueue):
+ base_class = T_Root
+ _triggered = 0
+ def finalizer_trigger(self):
+ self._triggered += 1
+
+class TestFinalizerQueue:
+
+ def test_simple(self):
+ fq = SimpleFQ()
+ assert fq.next_dead() is None
+ assert fq._triggered == 0
+ w = T_Int(67)
+ fq.register_finalizer(w)
+ #
+ gc.collect()
+ assert fq._triggered == 0
+ assert fq.next_dead() is None
+ #
+ del w
+ gc.collect()
+ assert fq._triggered == 1
+ n = fq.next_dead()
+ assert type(n) is T_Int and n.x == 67
+ #
+ gc.collect()
+ assert fq._triggered == 1
+ assert fq.next_dead() is None
+
+ def test_del_1(self):
+ deleted = {}
+ class T_Del(T_Int):
+ def __del__(self):
+ deleted[self.x] = deleted.get(self.x, 0) + 1
+
+ fq = SimpleFQ()
+ fq.register_finalizer(T_Del(42))
+ gc.collect(); gc.collect()
+ assert deleted == {}
+ assert fq._triggered == 1
+ n = fq.next_dead()
+ assert type(n) is T_Del and n.x == 42
+ assert deleted == {}
+ del n
+ gc.collect()
+ assert fq.next_dead() is None
+ assert deleted == {42: 1}
+ assert fq._triggered == 1
+
+ def test_del_2(self):
+ deleted = {}
+ class T_Del1(T_Int):
+ def __del__(self):
+ deleted[1, self.x] = deleted.get((1, self.x), 0) + 1
+ class T_Del2(T_Del1):
+ def __del__(self):
+ deleted[2, self.x] = deleted.get((2, self.x), 0) + 1
+ T_Del1.__del__(self)
+
+ fq = SimpleFQ()
+ w = T_Del2(42)
+ fq.register_finalizer(w)
+ fq.register_finalizer(w)
+ fq.register_finalizer(w)
+ del w
+ fq.register_finalizer(T_Del1(21))
+ gc.collect(); gc.collect()
+ assert deleted == {}
+ assert fq._triggered == 2
+ a = fq.next_dead()
+ b = fq.next_dead()
+ if a.x == 21:
+ a, b = b, a
+ assert type(a) is T_Del2 and a.x == 42
+ assert type(b) is T_Del1 and b.x == 21
+ assert deleted == {}
+ del a, b
+ gc.collect()
+ assert fq.next_dead() is None
+ assert deleted == {(1, 42): 1, (2, 42): 1, (1, 21): 1}
+ assert fq._triggered == 2
+
+ def test_del_3(self):
+ deleted = {}
+ class T_Del1(T_Int):
+ def __del__(self):
+ deleted[1, self.x] = deleted.get((1, self.x), 0) + 1
+ class T_Del2(T_Del1):
+ pass
+
+ fq = SimpleFQ()
+ fq.register_finalizer(T_Del2(42))
+ gc.collect(); gc.collect()
+ assert deleted == {}
+ assert fq._triggered == 1
+ a = fq.next_dead()
+ assert type(a) is T_Del2 and a.x == 42
+ assert deleted == {}
+ del a
+ gc.collect()
+ assert fq.next_dead() is None
+ assert deleted == {(1, 42): 1}
+ assert fq._triggered == 1
From pypy.commits at gmail.com Sun May 1 13:11:32 2016
From: pypy.commits at gmail.com (rlamy)
Date: Sun, 01 May 2016 10:11:32 -0700 (PDT)
Subject: [pypy-commit] pypy py3k-update: hg merge default
Message-ID: <572638c4.52ad1c0a.1063e.ffff996a@mx.google.com>
Author: Ronan Lamy
Branch: py3k-update
Changeset: r84107:db30c99ce18e
Date: 2016-05-01 18:10 +0100
http://bitbucket.org/pypy/pypy/changeset/db30c99ce18e/
Log: hg merge default
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -21,3 +21,4 @@
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -117,13 +117,22 @@
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -24,7 +24,11 @@
remove-objspace-options.
.. branch: cpyext-for-merge
-Update cpyext C-API support:
+
+Update cpyext C-API support After this branch, we are almost able to support
+upstream numpy via cpyext, so we created (yet another) fork of numpy at
+github.com/pypy/numpy with the needed changes. Among the significant changes
+to cpyext:
- allow c-snippet tests to be run with -A so we can verify we are compatible
- fix many edge cases exposed by fixing tests to run with -A
- issequence() logic matches cpython
@@ -40,6 +44,20 @@
- rewrite slot assignment for typeobjects
- improve tracking of PyObject to rpython object mapping
- support tp_as_{number, sequence, mapping, buffer} slots
-After this branch, we are almost able to support upstream numpy via cpyext, so
-we created (yet another) fork of numpy at github.com/pypy/numpy with the needed
-changes
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -87,7 +87,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
except SystemExit as e:
handle_sys_exit(e)
except BaseException as e:
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -364,6 +364,26 @@
""")
assert seen == [1]
+ def test_mapdict_number_of_slots(self):
+ space = self.space
+ a, b, c = space.unpackiterable(space.appexec([], """():
+ class A(object):
+ pass
+ a = A()
+ a.x = 1
+ class B:
+ pass
+ b = B()
+ b.x = 1
+ class C(int):
+ pass
+ c = C(1)
+ c.x = 1
+ return a, b, c
+ """), 3)
+ assert not hasattr(a, "storage")
+ assert not hasattr(b, "storage")
+ assert hasattr(c, "storage")
class AppTestTypeDef:
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -103,44 +103,63 @@
# we need two subclasses of the app-level type, one to add mapdict, and then one
# to add del to not slow down the GC.
-def get_unique_interplevel_subclass(config, cls, needsdel=False):
+def get_unique_interplevel_subclass(space, cls, needsdel=False):
"NOT_RPYTHON: initialization-time only"
if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = config, cls, needsdel
+ key = space, cls, needsdel
try:
return _subclass_cache[key]
except KeyError:
# XXX can save a class if cls already has a __del__
if needsdel:
- cls = get_unique_interplevel_subclass(config, cls, False)
- subcls = _getusercls(config, cls, needsdel)
+ cls = get_unique_interplevel_subclass(space, cls, False)
+ subcls = _getusercls(space, cls, needsdel)
assert key not in _subclass_cache
_subclass_cache[key] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
-def _getusercls(config, cls, wants_del, reallywantdict=False):
+def _getusercls(space, cls, wants_del, reallywantdict=False):
from rpython.rlib import objectmodel
+ from pypy.objspace.std.objectobject import W_ObjectObject
+ from pypy.module.__builtin__.interp_classobj import W_InstanceObject
from pypy.objspace.std.mapdict import (BaseUserClassMapdict,
MapdictDictSupport, MapdictWeakrefSupport,
- _make_storage_mixin_size_n)
+ _make_storage_mixin_size_n, MapdictStorageMixin)
typedef = cls.typedef
name = cls.__name__ + "User"
- mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()]
+ mixins_needed = []
+ if cls is W_ObjectObject or cls is W_InstanceObject:
+ mixins_needed.append(_make_storage_mixin_size_n())
+ else:
+ mixins_needed.append(MapdictStorageMixin)
+ copy_methods = [BaseUserClassMapdict]
if reallywantdict or not typedef.hasdict:
# the type has no dict, mapdict to provide the dict
- mixins_needed.append(MapdictDictSupport)
+ copy_methods.append(MapdictDictSupport)
name += "Dict"
if not typedef.weakrefable:
# the type does not support weakrefs yet, mapdict to provide weakref
# support
- mixins_needed.append(MapdictWeakrefSupport)
+ copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
if wants_del:
+ # This subclass comes with an app-level __del__. To handle
+ # it, we make an RPython-level __del__ method. This
+ # RPython-level method is called directly by the GC and it
+ # cannot do random things (calling the app-level __del__ would
+ # be "random things"). So instead, we just call here
+ # enqueue_for_destruction(), and the app-level __del__ will be
+ # called later at a safe point (typically between bytecodes).
+ # If there is also an inherited RPython-level __del__, it is
+ # called afterwards---not immediately! This base
+ # RPython-level __del__ is supposed to run only when the
+ # object is not reachable any more. NOTE: it doesn't fully
+ # work: see issue #2287.
name += "Del"
parent_destructor = getattr(cls, '__del__', None)
def call_parent_del(self):
@@ -148,14 +167,14 @@
parent_destructor(self)
def call_applevel_del(self):
assert isinstance(self, subcls)
- self.space.userdel(self)
+ space.userdel(self)
class Proto(object):
def __del__(self):
self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, call_applevel_del,
+ self.enqueue_for_destruction(space, call_applevel_del,
'method __del__ of ')
if parent_destructor is not None:
- self.enqueue_for_destruction(self.space, call_parent_del,
+ self.enqueue_for_destruction(space, call_parent_del,
'internal destructor of ')
mixins_needed.append(Proto)
@@ -163,10 +182,17 @@
user_overridden_class = True
for base in mixins_needed:
objectmodel.import_from_mixin(base)
+ for copycls in copy_methods:
+ _copy_methods(copycls, subcls)
del subcls.base
subcls.__name__ = name
return subcls
+def _copy_methods(copycls, subcls):
+ for key, value in copycls.__dict__.items():
+ if (not key.startswith('__') or key == '__del__'):
+ setattr(subcls, key, value)
+
# ____________________________________________________________
diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py
--- a/pypy/module/_io/test/test_bufferedio.py
+++ b/pypy/module/_io/test/test_bufferedio.py
@@ -318,7 +318,6 @@
class MyIO(_io.BufferedWriter):
def __del__(self):
record.append(1)
- super(MyIO, self).__del__()
def close(self):
record.append(2)
super(MyIO, self).close()
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -88,7 +88,6 @@
class MyIO(io.IOBase):
def __del__(self):
record.append(1)
- super(MyIO, self).__del__()
def close(self):
record.append(2)
super(MyIO, self).close()
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -10,6 +10,7 @@
from rpython.rtyper.lltypesystem import ll2ctypes
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
+from rpython.rlib.objectmodel import dont_inline
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -255,7 +256,7 @@
class ApiFunction:
def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED,
- c_name=None, gil=None, result_borrowed=False):
+ c_name=None, gil=None, result_borrowed=False, result_is_ll=False):
self.argtypes = argtypes
self.restype = restype
self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype))
@@ -276,6 +277,9 @@
assert len(self.argnames) == len(self.argtypes)
self.gil = gil
self.result_borrowed = result_borrowed
+ self.result_is_ll = result_is_ll
+ if result_is_ll: # means 'returns a low-level PyObject pointer'
+ assert is_PyObject(restype)
#
def get_llhelper(space):
return llhelper(self.functype, self.get_wrapper(space))
@@ -300,7 +304,7 @@
DEFAULT_HEADER = 'pypy_decl.h'
def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER,
- gil=None, result_borrowed=False):
+ gil=None, result_borrowed=False, result_is_ll=False):
"""
Declares a function to be exported.
- `argtypes`, `restype` are lltypes and describe the function signature.
@@ -339,7 +343,8 @@
c_name = func_name
api_function = ApiFunction(argtypes, restype, func, error,
c_name=c_name, gil=gil,
- result_borrowed=result_borrowed)
+ result_borrowed=result_borrowed,
+ result_is_ll=result_is_ll)
func.api_func = api_function
if error is _NOT_SPECIFIED:
@@ -614,6 +619,9 @@
def is_PyObject(TYPE):
if not isinstance(TYPE, lltype.Ptr):
return False
+ if TYPE == PyObject:
+ return True
+ assert not isinstance(TYPE.TO, lltype.ForwardReference)
return hasattr(TYPE.TO, 'c_ob_refcnt') and hasattr(TYPE.TO, 'c_ob_type')
# a pointer to PyObject
@@ -670,37 +678,161 @@
pypy_debug_catch_fatal_exception = rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void)
+
+# ____________________________________________________________
+
+
+class WrapperCache(object):
+ def __init__(self, space):
+ self.space = space
+ self.wrapper_gens = {} # {signature: WrapperGen()}
+ self.stats = [0, 0]
+
+class WrapperGen(object):
+ wrapper_second_level = None
+
+ def __init__(self, space, signature):
+ self.space = space
+ self.signature = signature
+ self.callable2name = []
+
+ def make_wrapper(self, callable):
+ self.callable2name.append((callable, callable.__name__))
+ if self.wrapper_second_level is None:
+ self.wrapper_second_level = make_wrapper_second_level(
+ self.space, self.callable2name, *self.signature)
+ wrapper_second_level = self.wrapper_second_level
+
+ def wrapper(*args):
+ # no GC here, not even any GC object
+ args += (callable,)
+ return wrapper_second_level(*args)
+
+ wrapper.__name__ = "wrapper for %r" % (callable, )
+ return wrapper
+
+
# Make the wrapper for the cases (1) and (2)
def make_wrapper(space, callable, gil=None):
"NOT_RPYTHON"
+ # This logic is obscure, because we try to avoid creating one
+ # big wrapper() function for every callable. Instead we create
+ # only one per "signature".
+
+ argnames = callable.api_func.argnames
+ argtypesw = zip(callable.api_func.argtypes,
+ [_name.startswith("w_") for _name in argnames])
+ error_value = getattr(callable.api_func, "error_value", CANNOT_FAIL)
+ if (isinstance(callable.api_func.restype, lltype.Ptr)
+ and error_value is not CANNOT_FAIL):
+ assert lltype.typeOf(error_value) == callable.api_func.restype
+ assert not error_value # only support error=NULL
+ error_value = 0 # because NULL is not hashable
+
+ if callable.api_func.result_is_ll:
+ result_kind = "L"
+ elif callable.api_func.result_borrowed:
+ result_kind = "B" # note: 'result_borrowed' is ignored if we also
+ else: # say 'result_is_ll=True' (in this case it's
+ result_kind = "." # up to you to handle refcounting anyway)
+
+ signature = (tuple(argtypesw),
+ callable.api_func.restype,
+ result_kind,
+ error_value,
+ gil)
+
+ cache = space.fromcache(WrapperCache)
+ cache.stats[1] += 1
+ try:
+ wrapper_gen = cache.wrapper_gens[signature]
+ except KeyError:
+ print signature
+ wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
+ signature)
+ cache.stats[0] += 1
+ #print 'Wrapper cache [wrappers/total]:', cache.stats
+ return wrapper_gen.make_wrapper(callable)
+
+
+ at dont_inline
+def deadlock_error(funcname):
+ fatalerror_notb("GIL deadlock detected when a CPython C extension "
+ "module calls '%s'" % (funcname,))
+
+ at dont_inline
+def no_gil_error(funcname):
+ fatalerror_notb("GIL not held when a CPython C extension "
+ "module calls '%s'" % (funcname,))
+
+ at dont_inline
+def not_supposed_to_fail(funcname):
+ raise SystemError("The function '%s' was not supposed to fail"
+ % (funcname,))
+
+ at dont_inline
+def unexpected_exception(funcname, e, tb):
+ print 'Fatal error in cpyext, CPython compatibility layer, calling',funcname
+ print 'Either report a bug or consider not using this particular extension'
+ if not we_are_translated():
+ if tb is None:
+ tb = sys.exc_info()[2]
+ import traceback
+ traceback.print_exc()
+ if sys.stdout == sys.__stdout__:
+ import pdb; pdb.post_mortem(tb)
+ # we can't do much here, since we're in ctypes, swallow
+ else:
+ print str(e)
+ pypy_debug_catch_fatal_exception()
+ assert False
+
+def make_wrapper_second_level(space, callable2name, argtypesw, restype,
+ result_kind, error_value, gil):
from rpython.rlib import rgil
- names = callable.api_func.argnames
- argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes,
- [name.startswith("w_") for name in names])))
- fatal_value = callable.api_func.restype._defl()
+ argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
+ fatal_value = restype._defl()
+ gil_auto_workaround = (gil is None) # automatically detect when we don't
+ # have the GIL, and acquire/release it
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
pygilstate_ensure = (gil == "pygilstate_ensure")
pygilstate_release = (gil == "pygilstate_release")
assert (gil is None or gil_acquire or gil_release
or pygilstate_ensure or pygilstate_release)
- deadlock_error = ("GIL deadlock detected when a CPython C extension "
- "module calls %r" % (callable.__name__,))
- no_gil_error = ("GIL not held when a CPython C extension "
- "module calls %r" % (callable.__name__,))
+ expected_nb_args = len(argtypesw) + pygilstate_ensure
- @specialize.ll()
- def wrapper(*args):
+ if isinstance(restype, lltype.Ptr) and error_value == 0:
+ error_value = lltype.nullptr(restype.TO)
+ if error_value is not CANNOT_FAIL:
+ assert lltype.typeOf(error_value) == lltype.typeOf(fatal_value)
+
+ def invalid(err):
+ "NOT_RPYTHON: translation-time crash if this ends up being called"
+ raise ValueError(err)
+ invalid.__name__ = 'invalid_%s' % (callable2name[0][1],)
+
+ def nameof(callable):
+ for c, n in callable2name:
+ if c is callable:
+ return n
+ return ''
+ nameof._dont_inline_ = True
+
+ def wrapper_second_level(*args):
from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj
from pypy.module.cpyext.pyobject import as_pyobj
# we hope that malloc removal removes the newtuple() that is
# inserted exactly here by the varargs specializer
+ callable = args[-1]
+ args = args[:-1]
# see "Handling of the GIL" above (careful, we don't have the GIL here)
tid = rthread.get_or_make_ident()
- if gil_acquire:
+ _gil_auto = (gil_auto_workaround and cpyext_glob_tid_ptr[0] != tid)
+ if gil_acquire or _gil_auto:
if cpyext_glob_tid_ptr[0] == tid:
- fatalerror_notb(deadlock_error)
+ deadlock_error(nameof(callable))
rgil.acquire()
assert cpyext_glob_tid_ptr[0] == 0
elif pygilstate_ensure:
@@ -713,7 +845,7 @@
args += (pystate.PyGILState_UNLOCKED,)
else:
if cpyext_glob_tid_ptr[0] != tid:
- fatalerror_notb(no_gil_error)
+ no_gil_error(nameof(callable))
cpyext_glob_tid_ptr[0] = 0
rffi.stackcounter.stacks_counter += 1
@@ -724,8 +856,7 @@
try:
if not we_are_translated() and DEBUG_WRAPPER:
print >>sys.stderr, callable,
- assert len(args) == (len(callable.api_func.argtypes) +
- pygilstate_ensure)
+ assert len(args) == expected_nb_args
for i, (typ, is_wrapped) in argtypes_enum_ui:
arg = args[i]
if is_PyObject(typ) and is_wrapped:
@@ -759,41 +890,31 @@
failed = False
if failed:
- error_value = callable.api_func.error_value
if error_value is CANNOT_FAIL:
- raise SystemError("The function '%s' was not supposed to fail"
- % (callable.__name__,))
+ raise not_supposed_to_fail(nameof(callable))
retval = error_value
- elif is_PyObject(callable.api_func.restype):
+ elif is_PyObject(restype):
if is_pyobj(result):
- retval = result
+ if result_kind != "L":
+ raise invalid("missing result_is_ll=True")
else:
- if result is not None:
- if callable.api_func.result_borrowed:
- retval = as_pyobj(space, result)
- else:
- retval = make_ref(space, result)
- retval = rffi.cast(callable.api_func.restype, retval)
+ if result_kind == "L":
+ raise invalid("result_is_ll=True but not ll PyObject")
+ if result_kind == "B": # borrowed
+ result = as_pyobj(space, result)
else:
- retval = lltype.nullptr(PyObject.TO)
- elif callable.api_func.restype is not lltype.Void:
- retval = rffi.cast(callable.api_func.restype, result)
+ result = make_ref(space, result)
+ retval = rffi.cast(restype, result)
+
+ elif restype is not lltype.Void:
+ retval = rffi.cast(restype, result)
+
except Exception, e:
- print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__
- print 'Either report a bug or consider not using this particular extension'
- if not we_are_translated():
- if tb is None:
- tb = sys.exc_info()[2]
- import traceback
- traceback.print_exc()
- if sys.stdout == sys.__stdout__:
- import pdb; pdb.post_mortem(tb)
- # we can't do much here, since we're in ctypes, swallow
- else:
- print str(e)
- pypy_debug_catch_fatal_exception()
- assert False
+ unexpected_exception(nameof(callable), e, tb)
+ return fatal_value
+
+ assert lltype.typeOf(retval) == restype
rffi.stackcounter.stacks_counter -= 1
# see "Handling of the GIL" above
@@ -803,16 +924,16 @@
arg = rffi.cast(lltype.Signed, args[-1])
unlock = (arg == pystate.PyGILState_UNLOCKED)
else:
- unlock = gil_release
+ unlock = gil_release or _gil_auto
if unlock:
rgil.release()
else:
cpyext_glob_tid_ptr[0] = tid
return retval
- callable._always_inline_ = 'try'
- wrapper.__name__ = "wrapper for %r" % (callable, )
- return wrapper
+
+ wrapper_second_level._dont_inline_ = True
+ return wrapper_second_level
def process_va_name(name):
return name.replace('*', '_star')
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -6,7 +6,7 @@
from pypy.module.cpyext.pyerrors import PyErr_BadArgument
from pypy.module.cpyext.pyobject import (
PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference,
- make_typedescr, get_typedescr, as_pyobj, Py_IncRef)
+ make_typedescr, get_typedescr, as_pyobj, Py_IncRef, get_w_obj_and_decref)
##
## Implementation of PyBytesObject
@@ -124,7 +124,7 @@
#_______________________________________________________________________
- at cpython_api([CONST_STRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyBytes_FromStringAndSize(space, char_p, length):
if char_p:
s = rffi.charpsize2str(char_p, length)
@@ -221,7 +221,7 @@
def _PyBytes_Eq(space, w_str1, w_str2):
return space.eq_w(w_str1, w_str2)
- at cpython_api([PyObjectP, PyObject], lltype.Void)
+ at cpython_api([PyObjectP, PyObject], lltype.Void, error=None)
def PyBytes_Concat(space, ref, w_newpart):
"""Create a new string object in *string containing the contents of newpart
appended to string; the caller will own the new reference. The reference to
@@ -229,25 +229,25 @@
the old reference to string will still be discarded and the value of
*string will be set to NULL; the appropriate exception will be set."""
- if not ref[0]:
+ old = ref[0]
+ if not old:
return
- if w_newpart is None or not PyBytes_Check(space, ref[0]) or \
- not PyBytes_Check(space, w_newpart):
- Py_DecRef(space, ref[0])
- ref[0] = lltype.nullptr(PyObject.TO)
- return
- w_str = from_ref(space, ref[0])
- w_newstr = space.add(w_str, w_newpart)
- ref[0] = make_ref(space, w_newstr)
- Py_IncRef(space, ref[0])
+ ref[0] = lltype.nullptr(PyObject.TO)
+ w_str = get_w_obj_and_decref(space, old)
+ if w_newpart is not None and PyBytes_Check(space, old):
+ # XXX: should use buffer protocol
+ w_newstr = space.add(w_str, w_newpart)
+ ref[0] = make_ref(space, w_newstr)
- at cpython_api([PyObjectP, PyObject], lltype.Void)
+ at cpython_api([PyObjectP, PyObject], lltype.Void, error=None)
def PyBytes_ConcatAndDel(space, ref, newpart):
"""Create a new string object in *string containing the contents of newpart
appended to string. This version decrements the reference count of newpart."""
- PyBytes_Concat(space, ref, newpart)
- Py_DecRef(space, newpart)
+ try:
+ PyBytes_Concat(space, ref, newpart)
+ finally:
+ Py_DecRef(space, newpart)
@cpython_api([PyObject, PyObject], PyObject)
def _PyBytes_Join(space, w_sep, w_seq):
diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py
--- a/pypy/module/cpyext/frameobject.py
+++ b/pypy/module/cpyext/frameobject.py
@@ -67,7 +67,8 @@
track_reference(space, py_obj, w_obj)
return w_obj
- at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject)
+ at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject,
+ result_is_ll=True)
def PyFrame_New(space, tstate, w_code, w_globals, w_locals):
typedescr = get_typedescr(PyFrame.typedef)
py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef))
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -239,9 +239,7 @@
gufunctype = lltype.Ptr(ufuncs.GenericUfunc)
-# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there
-# a problem with casting function pointers?
- at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
+ at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t,
rffi.CCHARP], PyObject, header=HEADER)
def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes,
@@ -256,7 +254,7 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
- funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data)
+ funcs_w[i] = ufuncs.W_GenericUFuncCaller(funcs[i], data)
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
w_funcs = space.newlist(funcs_w)
@@ -268,7 +266,7 @@
w_signature, w_identity, w_name, w_doc, stack_inputs=True)
return ufunc_generic
- at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
+ at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER)
def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes,
nin, nout, identity, name, doc, check_return):
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -38,11 +38,11 @@
def PyObject_Free(space, ptr):
lltype.free(ptr, flavor='raw')
- at cpython_api([PyTypeObjectPtr], PyObject)
+ at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True)
def _PyObject_New(space, type):
return _PyObject_NewVar(space, type, 0)
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def _PyObject_NewVar(space, type, itemcount):
w_type = from_ref(space, rffi.cast(PyObject, type))
assert isinstance(w_type, W_TypeObject)
@@ -67,7 +67,7 @@
if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE:
Py_DecRef(space, rffi.cast(PyObject, pto))
- at cpython_api([PyTypeObjectPtr], PyObject)
+ at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True)
def _PyObject_GC_New(space, type):
return _PyObject_New(space, type)
@@ -201,7 +201,7 @@
space.delitem(w_obj, w_key)
return 0
- at cpython_api([PyObject, PyTypeObjectPtr], PyObject)
+ at cpython_api([PyObject, PyTypeObjectPtr], PyObject, result_is_ll=True)
def PyObject_Init(space, obj, type):
"""Initialize a newly-allocated object op with its type and initial
reference. Returns the initialized object. If type indicates that the
@@ -215,7 +215,7 @@
obj.c_ob_refcnt = 1
return obj
- at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def PyObject_InitVar(space, py_obj, type, size):
"""This does everything PyObject_Init() does, and also initializes the
length information for a variable-size object."""
@@ -305,7 +305,7 @@
w_res = PyObject_RichCompare(space, ref1, ref2, opid)
return int(space.is_true(w_res))
- at cpython_api([PyObject], PyObject)
+ at cpython_api([PyObject], PyObject, result_is_ll=True)
def PyObject_SelfIter(space, ref):
"""Undocumented function, this is what CPython does."""
Py_IncRef(space, ref)
diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py
--- a/pypy/module/cpyext/pystate.py
+++ b/pypy/module/cpyext/pystate.py
@@ -172,8 +172,16 @@
py_fatalerror("PyThreadState_Get: no current thread")
return ts
- at cpython_api([], PyObject, error=CANNOT_FAIL)
+ at cpython_api([], PyObject, result_is_ll=True, error=CANNOT_FAIL)
def PyThreadState_GetDict(space):
+ """Return a dictionary in which extensions can store thread-specific state
+ information. Each extension should use a unique key to use to store state in
+ the dictionary. It is okay to call this function when no current thread state
+ is available. If this function returns NULL, no exception has been raised and
+ the caller should assume no current thread state is available.
+
+ Previously this could only be called when a current thread is active, and NULL
+ meant that an exception was raised."""
state = space.fromcache(InterpreterState)
return state.get_thread_state(space).c_dict
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.bytesobject import new_empty_str, PyBytesObject
-from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP
+from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call
from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref
from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
@@ -145,6 +145,7 @@
"""
PyObject ** v;
PyObject * left = PyTuple_GetItem(args, 0);
+ Py_INCREF(left); /* the reference will be stolen! */
v = &left;
PyBytes_Concat(v, PyTuple_GetItem(args, 1));
return *v;
@@ -221,6 +222,7 @@
assert space.bytes_w(from_ref(space, ptr[0])) == 'abcdef'
api.PyBytes_Concat(ptr, space.w_None)
assert not ptr[0]
+ api.PyErr_Clear()
ptr[0] = lltype.nullptr(PyObject.TO)
api.PyBytes_Concat(ptr, space.wrapbytes('def')) # should not crash
lltype.free(ptr, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -109,7 +109,7 @@
Py_RETURN_NONE;
"""
)
- ])
+ ], prologue='#include "datetime.h"\n')
import datetime
assert module.get_types() == (datetime.date,
datetime.datetime,
diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py
--- a/pypy/module/cpyext/test/test_dictobject.py
+++ b/pypy/module/cpyext/test/test_dictobject.py
@@ -185,6 +185,7 @@
if (!PyArg_ParseTuple(args, "O", &dict))
return NULL;
proxydict = PyDictProxy_New(dict);
+#ifdef PYPY_VERSION // PyDictProxy_Check[Exact] are PyPy-specific.
if (!PyDictProxy_Check(proxydict)) {
Py_DECREF(proxydict);
PyErr_SetNone(PyExc_ValueError);
@@ -195,6 +196,7 @@
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
+#endif // PYPY_VERSION
i = PyObject_Size(proxydict);
Py_DECREF(proxydict);
return PyLong_FromLong(i);
diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py
--- a/pypy/module/cpyext/test/test_ndarrayobject.py
+++ b/pypy/module/cpyext/test/test_ndarrayobject.py
@@ -368,7 +368,7 @@
def test_ufunc(self):
if self.runappdirect:
from numpy import arange
- py.test.xfail('why does this segfault on cpython?')
+ py.test.xfail('segfaults on cpython: PyUFunc_API == NULL?')
else:
from _numpypy.multiarray import arange
mod = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -355,6 +355,8 @@
assert "in test_PyErr_Display\n" in output
assert "ZeroDivisionError" in output
+ @pytest.mark.skipif(True, reason=
+ "XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free")
def test_GetSetExcInfo(self):
import sys
if self.runappdirect and (sys.version_info.major < 3 or
diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py
--- a/pypy/module/cpyext/test/test_thread.py
+++ b/pypy/module/cpyext/test/test_thread.py
@@ -1,9 +1,12 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class AppTestThread(AppTestCpythonExtensionBase):
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_get_thread_ident(self):
module = self.import_extension('foo', [
("get_thread_ident", "METH_NOARGS",
@@ -30,6 +33,7 @@
assert results[0][0] != results[1][0]
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_acquire_lock(self):
module = self.import_extension('foo', [
("test_acquire_lock", "METH_NOARGS",
@@ -53,13 +57,14 @@
])
module.test_acquire_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_release_lock(self):
module = self.import_extension('foo', [
("test_release_lock", "METH_NOARGS",
"""
#ifndef PyThread_release_lock
#error "seems we are not accessing PyPy's functions"
-#endif
+#endif
PyThread_type_lock lock = PyThread_allocate_lock();
PyThread_acquire_lock(lock, 1);
PyThread_release_lock(lock);
@@ -74,6 +79,7 @@
])
module.test_release_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_tls(self):
module = self.import_extension('foo', [
("create_key", "METH_NOARGS",
diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py
--- a/pypy/module/cpyext/test/test_tupleobject.py
+++ b/pypy/module/cpyext/test/test_tupleobject.py
@@ -84,7 +84,14 @@
"""
PyObject *item = PyTuple_New(0);
PyObject *t = PyTuple_New(1);
- if (t->ob_refcnt != 1 || item->ob_refcnt != 1) {
+#ifdef PYPY_VERSION
+ // PyPy starts even empty tuples with a refcount of 1.
+ const int initial_item_refcount = 1;
+#else
+ // CPython can cache ().
+ const int initial_item_refcount = item->ob_refcnt;
+#endif // PYPY_VERSION
+ if (t->ob_refcnt != 1 || item->ob_refcnt != initial_item_refcount) {
PyErr_SetString(PyExc_SystemError, "bad initial refcnt");
return NULL;
}
@@ -94,8 +101,8 @@
PyErr_SetString(PyExc_SystemError, "SetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "SetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
@@ -109,8 +116,8 @@
PyErr_SetString(PyExc_SystemError, "GetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
return t;
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -24,8 +24,11 @@
if(PyUnicode_GetSize(s) != 11) {
result = -PyUnicode_GetSize(s);
}
+#ifdef PYPY_VERSION
+ // Slightly silly test that tp_basicsize is reasonable.
if(s->ob_type->tp_basicsize != sizeof(void*)*6)
result = s->ob_type->tp_basicsize;
+#endif // PYPY_VERSION
Py_DECREF(s);
return PyLong_FromLong(result);
"""),
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -29,8 +29,6 @@
PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION);
PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION);
PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION);
- PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
- PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
return m;
}
"""
@@ -39,6 +37,18 @@
assert module.py_major_version == sys.version_info.major
assert module.py_minor_version == sys.version_info.minor
assert module.py_micro_version == sys.version_info.micro
+
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
+ def test_pypy_versions(self):
+ import sys
+ init = """
+ if (Py_IsInitialized()) {
+ PyObject *m = Py_InitModule("foo", NULL);
+ PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
+ PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
+ }
+ """
+ module = self.import_module(name='foo', init=init)
v = sys.pypy_version_info
s = '%d.%d.%d' % (v[0], v[1], v[2])
if v.releaselevel != 'final':
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -127,7 +127,7 @@
#_______________________________________________________________________
- at cpython_api([Py_ssize_t], PyObject)
+ at cpython_api([Py_ssize_t], PyObject, result_is_ll=True)
def PyTuple_New(space, size):
return rffi.cast(PyObject, new_empty_tuple(space, size))
@@ -150,7 +150,8 @@
decref(space, old_ref)
return 0
- at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True)
+ at cpython_api([PyObject, Py_ssize_t], PyObject,
+ result_borrowed=True, result_is_ll=True)
def PyTuple_GetItem(space, ref, index):
if not tuple_check_ref(space, ref):
PyErr_BadInternalCall(space)
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -701,7 +701,7 @@
w_type2 = from_ref(space, rffi.cast(PyObject, b))
return int(abstract_issubclass_w(space, w_type1, w_type2)) #XXX correct?
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def PyType_GenericAlloc(space, type, nitems):
from pypy.module.cpyext.object import _PyObject_NewVar
return _PyObject_NewVar(space, type, nitems)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -337,7 +337,7 @@
return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict')
- at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromUnicode(space, wchar_p, length):
"""Create a Unicode Object from the Py_UNICODE buffer u of the given size. u
may be NULL which causes the contents to be undefined. It is the user's
@@ -351,14 +351,14 @@
else:
return rffi.cast(PyObject, new_empty_unicode(space, length))
- at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromWideChar(space, wchar_p, length):
"""Create a Unicode object from the wchar_t buffer w of the given size.
Return NULL on failure."""
# PyPy supposes Py_UNICODE == wchar_t
return PyUnicode_FromUnicode(space, wchar_p, length)
- at cpython_api([PyObject, CONST_STRING], PyObject)
+ at cpython_api([PyObject, CONST_STRING], PyObject, result_is_ll=True)
def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors):
return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors)
@@ -532,7 +532,7 @@
w_str = PyUnicode_FromString(space, s)
return space.new_interned_w_str(w_str)
- at cpython_api([CONST_STRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromStringAndSize(space, s, size):
"""Create a Unicode Object from the char buffer u. The bytes will be
interpreted as being UTF-8 encoded. u may also be NULL which causes the
diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py
--- a/pypy/module/unicodedata/interp_ucd.py
+++ b/pypy/module/unicodedata/interp_ucd.py
@@ -4,7 +4,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.objectmodel import we_are_translated
@@ -34,8 +34,9 @@
# Target is wide build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode == 0xFFFF:
# Host CPython is narrow build, accept surrogates
@@ -54,8 +55,9 @@
# Target is narrow build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode > 0xFFFF:
# Host CPython is wide build, forbid surrogates
@@ -187,7 +189,9 @@
@unwrap_spec(form=str)
def normalize(self, space, form, w_unistr):
if not space.isinstance_w(w_unistr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap('argument 2 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 2 must be unicode, not %T',
+ w_unistr)
if form == 'NFC':
composed = True
decomposition = self._canon_decomposition
diff --git a/pypy/module/unicodedata/test/test_unicodedata.py b/pypy/module/unicodedata/test/test_unicodedata.py
--- a/pypy/module/unicodedata/test/test_unicodedata.py
+++ b/pypy/module/unicodedata/test/test_unicodedata.py
@@ -78,10 +78,15 @@
import unicodedata
assert unicodedata.lookup("GOTHIC LETTER FAIHU") == '\U00010346'
- def test_normalize(self):
+ def test_normalize_bad_argcount(self):
import unicodedata
raises(TypeError, unicodedata.normalize, 'x')
+ def test_normalize_nonunicode(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.normalize, 'NFC', b'x')
+ assert 'must be unicode, not' in str(exc_info.value)
+
@py.test.mark.skipif("sys.maxunicode < 0x10ffff")
def test_normalize_wide(self):
import unicodedata
@@ -103,9 +108,10 @@
# For no reason, unicodedata.mirrored() returns an int, not a bool
assert repr(unicodedata.mirrored(' ')) == '0'
- def test_bidirectional(self):
+ def test_bidirectional_not_one_character(self):
import unicodedata
- raises(TypeError, unicodedata.bidirectional, 'xx')
+ exc_info = raises(TypeError, unicodedata.bidirectional, u'xx')
+ assert str(exc_info.value) == 'need a single Unicode character as parameter'
def test_aliases(self):
import unicodedata
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -277,7 +277,7 @@
def copy(self, obj):
result = Object()
result.space = self.space
- result._init_empty(self)
+ result._mapdict_init_empty(self)
return result
def length(self):
@@ -286,7 +286,7 @@
def set_terminator(self, obj, terminator):
result = Object()
result.space = self.space
- result._init_empty(terminator)
+ result._mapdict_init_empty(terminator)
return result
def remove_dict_entries(self, obj):
@@ -304,7 +304,7 @@
def materialize_r_dict(self, space, obj, dict_w):
result = Object()
result.space = space
- result._init_empty(self.devolved_dict_terminator)
+ result._mapdict_init_empty(self.devolved_dict_terminator)
return result
@@ -417,11 +417,6 @@
def __repr__(self):
return "" % (self.name, self.index, self.storageindex, self.back)
-def _become(w_obj, new_obj):
- # this is like the _become method, really, but we cannot use that due to
- # RPython reasons
- w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
-
class MapAttrCache(object):
def __init__(self, space):
SIZE = 1 << space.config.objspace.std.methodcachesizeexp
@@ -457,22 +452,12 @@
# everything that's needed to use mapdict for a user subclass at all.
# This immediately makes slots possible.
- # assumes presence of _init_empty, _mapdict_read_storage,
+ # assumes presence of _get_mapdict_map, _set_mapdict_map
+ # _mapdict_init_empty, _mapdict_read_storage,
# _mapdict_write_storage, _mapdict_storage_length,
# _set_mapdict_storage_and_map
# _____________________________________________
- # methods needed for mapdict
-
- def _become(self, new_obj):
- self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
-
- def _get_mapdict_map(self):
- return jit.promote(self.map)
- def _set_mapdict_map(self, map):
- self.map = map
-
- # _____________________________________________
# objspace interface
# class access
@@ -482,13 +467,13 @@
def setclass(self, space, w_cls):
new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator)
- self._become(new_obj)
+ self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
def user_setup(self, space, w_subtype):
self.space = space
assert (not self.typedef.hasdict or
isinstance(w_subtype.terminator, NoDictTerminator))
- self._init_empty(w_subtype.terminator)
+ self._mapdict_init_empty(w_subtype.terminator)
# methods needed for slots
@@ -506,7 +491,7 @@
new_obj = self._get_mapdict_map().delete(self, "slot", index)
if new_obj is None:
return False
- self._become(new_obj)
+ self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
return True
@@ -547,7 +532,7 @@
new_obj = self._get_mapdict_map().delete(self, attrname, DICT)
if new_obj is None:
return False
- self._become(new_obj)
+ self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
return True
def getdict(self, space):
@@ -597,7 +582,12 @@
assert flag
class MapdictStorageMixin(object):
- def _init_empty(self, map):
+ def _get_mapdict_map(self):
+ return jit.promote(self.map)
+ def _set_mapdict_map(self, map):
+ self.map = map
+
+ def _mapdict_init_empty(self, map):
from rpython.rlib.debug import make_sure_not_resized
self.map = map
self.storage = make_sure_not_resized([None] * map.size_estimate())
@@ -611,6 +601,7 @@
def _mapdict_storage_length(self):
return len(self.storage)
+
def _set_mapdict_storage_and_map(self, storage, map):
self.storage = storage
self.map = map
@@ -641,7 +632,11 @@
rangenmin1 = unroll.unrolling_iterable(range(nmin1))
valnmin1 = "_value%s" % nmin1
class subcls(object):
- def _init_empty(self, map):
+ def _get_mapdict_map(self):
+ return jit.promote(self.map)
+ def _set_mapdict_map(self, map):
+ self.map = map
+ def _mapdict_init_empty(self, map):
for i in rangenmin1:
setattr(self, "_value%s" % i, None)
setattr(self, valnmin1, erase_item(None))
@@ -729,7 +724,7 @@
def get_empty_storage(self):
w_result = Object()
terminator = self.space.fromcache(get_terminator_for_dicts)
- w_result._init_empty(terminator)
+ w_result._mapdict_init_empty(terminator)
return self.erase(w_result)
def switch_to_object_strategy(self, w_dict):
@@ -809,7 +804,7 @@
def clear(self, w_dict):
w_obj = self.unerase(w_dict.dstorage)
new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj)
- _become(w_obj, new_obj)
+ w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
def popitem(self, w_dict):
curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT)
@@ -834,7 +829,7 @@
def materialize_r_dict(space, obj, dict_w):
map = obj._get_mapdict_map()
new_obj = map.materialize_r_dict(space, obj, dict_w)
- _become(obj, new_obj)
+ obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
class MapDictIteratorKeys(BaseKeyIterator):
def __init__(self, space, strategy, w_dict):
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -374,7 +374,7 @@
cls = cls.typedef.applevel_subclasses_base
#
subcls = get_unique_interplevel_subclass(
- self.config, cls, w_subtype.needsdel)
+ self, cls, w_subtype.needsdel)
instance = instantiate(subcls)
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -1,7 +1,7 @@
# Edit these appropriately before running this script
maj=5
min=1
-rev=0
+rev=1
branchname=release-$maj.x # ==OR== release-$maj.$min.x
tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev
diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/ll2ctypes.py
@@ -231,17 +231,7 @@
assert max_n >= 0
ITEM = A.OF
ctypes_item = get_ctypes_type(ITEM, delayed_builders)
- # Python 2.5 ctypes can raise OverflowError on 64-bit builds
- for n in [maxint, 2**31]:
- MAX_SIZE = n/64
- try:
- PtrType = ctypes.POINTER(MAX_SIZE * ctypes_item)
- except (OverflowError, AttributeError), e:
- pass # ^^^ bah, blame ctypes
- else:
- break
- else:
- raise e
+ ctypes_item_ptr = ctypes.POINTER(ctypes_item)
class CArray(ctypes.Structure):
if is_emulated_long:
@@ -265,35 +255,9 @@
bigarray.length = n
return bigarray
- _ptrtype = None
-
- @classmethod
- def _get_ptrtype(cls):
- if cls._ptrtype:
- return cls._ptrtype
- # ctypes can raise OverflowError on 64-bit builds
- # on windows it raises AttributeError even for 2**31 (_length_ missing)
- if _MS_WINDOWS:
- other_limit = 2**31-1
- else:
- other_limit = 2**31
- for n in [maxint, other_limit]:
- cls.MAX_SIZE = n / ctypes.sizeof(ctypes_item)
- try:
- cls._ptrtype = ctypes.POINTER(cls.MAX_SIZE * ctypes_item)
- except (OverflowError, AttributeError), e:
- pass
- else:
- break
- else:
- raise e
- return cls._ptrtype
-
def _indexable(self, index):
- PtrType = self._get_ptrtype()
- assert index + 1 < self.MAX_SIZE
- p = ctypes.cast(ctypes.pointer(self.items), PtrType)
- return p.contents
+ p = ctypes.cast(self.items, ctypes_item_ptr)
+ return p
def _getitem(self, index, boundscheck=True):
if boundscheck:
@@ -1045,12 +1009,22 @@
container = _array_of_known_length(T.TO)
container._storage = type(cobj)(cobj.contents)
elif isinstance(T.TO, lltype.FuncType):
+ # cobj is a CFunctionType object. We naively think
+ # that it should be a function pointer. No no no. If
+ # it was read out of an array, say, then it is a *pointer*
+ # to a function pointer. In other words, the read doesn't
+ # read anything, it just takes the address of the function
+ # pointer inside the array. If later the array is modified
+ # or goes out of scope, then we crash. CTypes is fun.
+ # It works if we cast it now to an int and back.
cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value)
if cobjkey in _int2obj:
container = _int2obj[cobjkey]
else:
+ name = getattr(cobj, '__name__', '?')
+ cobj = ctypes.cast(cobjkey, type(cobj))
_callable = get_ctypes_trampoline(T.TO, cobj)
- return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'),
+ return lltype.functionptr(T.TO, name,
_callable=_callable)
elif isinstance(T.TO, lltype.OpaqueType):
if T == llmemory.GCREF:
diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py
@@ -1405,6 +1405,45 @@
a2 = ctypes2lltype(lltype.Ptr(A), lltype2ctypes(a))
assert a2._obj.getitem(0)._obj._parentstructure() is a2._obj
+ def test_array_of_function_pointers(self):
+ c_source = py.code.Source(r"""
+ #include "src/precommondefs.h"
+ #include
+
+ typedef int(*funcptr_t)(void);
+ static int forty_two(void) { return 42; }
+ static int forty_three(void) { return 43; }
+ static funcptr_t testarray[2];
+ RPY_EXPORTED void runtest(void cb(funcptr_t *)) {
+ testarray[0] = &forty_two;
+ testarray[1] = &forty_three;
+ fprintf(stderr, "&forty_two = %p\n", testarray[0]);
+ fprintf(stderr, "&forty_three = %p\n", testarray[1]);
+ cb(testarray);
+ testarray[0] = 0;
+ testarray[1] = 0;
+ }
+ """)
+ eci = ExternalCompilationInfo(include_dirs=[cdir],
+ separate_module_sources=[c_source])
+
+ PtrF = lltype.Ptr(lltype.FuncType([], rffi.INT))
+ ArrayPtrF = rffi.CArrayPtr(PtrF)
+ CALLBACK = rffi.CCallback([ArrayPtrF], lltype.Void)
+
+ runtest = rffi.llexternal('runtest', [CALLBACK], lltype.Void,
+ compilation_info=eci)
+ seen = []
+
+ def callback(testarray):
+ seen.append(testarray[0]) # read a PtrF out of testarray
+ seen.append(testarray[1])
+
+ runtest(callback)
+ assert seen[0]() == 42
+ assert seen[1]() == 43
+
+
class TestPlatform(object):
def test_lib_on_libpaths(self):
from rpython.translator.platform import platform
diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py
--- a/rpython/rtyper/rpbc.py
+++ b/rpython/rtyper/rpbc.py
@@ -544,6 +544,21 @@
ll_compress = compression_function(r_set)
return llops.gendirectcall(ll_compress, v)
+class __extend__(pairtype(FunctionReprBase, FunctionReprBase)):
+ def rtype_is_((robj1, robj2), hop):
+ if hop.s_result.is_constant():
+ return inputconst(Bool, hop.s_result.const)
+ s_pbc = annmodel.unionof(robj1.s_pbc, robj2.s_pbc)
+ r_pbc = hop.rtyper.getrepr(s_pbc)
+ v1, v2 = hop.inputargs(r_pbc, r_pbc)
+ assert v1.concretetype == v2.concretetype
+ if v1.concretetype == Char:
+ return hop.genop('char_eq', [v1, v2], resulttype=Bool)
+ elif isinstance(v1.concretetype, Ptr):
+ return hop.genop('ptr_eq', [v1, v2], resulttype=Bool)
+ else:
+ raise TyperError("unknown type %r" % (v1.concretetype,))
+
def conversion_table(r_from, r_to):
if r_to in r_from._conversion_tables:
diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py
--- a/rpython/rtyper/test/test_rpbc.py
+++ b/rpython/rtyper/test/test_rpbc.py
@@ -1497,6 +1497,47 @@
res = self.interpret(f, [2])
assert res == False
+ def test_is_among_functions_2(self):
+ def g1(): pass
+ def g2(): pass
+ def f(n):
+ if n > 5:
+ g = g2
+ else:
+ g = g1
+ g()
+ return g is g2
+ res = self.interpret(f, [2])
+ assert res == False
+ res = self.interpret(f, [8])
+ assert res == True
+
+ def test_is_among_functions_3(self):
+ def g0(): pass
+ def g1(): pass
+ def g2(): pass
+ def g3(): pass
+ def g4(): pass
+ def g5(): pass
+ def g6(): pass
+ def g7(): pass
+ glist = [g0, g1, g2, g3, g4, g5, g6, g7]
+ def f(n):
+ if n > 5:
+ g = g2
+ else:
+ g = g1
+ h = glist[n]
+ g()
+ h()
+ return g is h
+ res = self.interpret(f, [2])
+ assert res == False
+ res = self.interpret(f, [1])
+ assert res == True
+ res = self.interpret(f, [6])
+ assert res == False
+
def test_shrink_pbc_set(self):
def g1():
return 10
From pypy.commits at gmail.com Sun May 1 16:37:00 2016
From: pypy.commits at gmail.com (rlamy)
Date: Sun, 01 May 2016 13:37:00 -0700 (PDT)
Subject: [pypy-commit] pypy py3k-update: Remove reference to old-style
classes
Message-ID: <572668ec.cf8ec20a.1afa0.ffffbe38@mx.google.com>
Author: Ronan Lamy
Branch: py3k-update
Changeset: r84108:3e13ccbf37de
Date: 2016-05-01 21:36 +0100
http://bitbucket.org/pypy/pypy/changeset/3e13ccbf37de/
Log: Remove reference to old-style classes
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -125,7 +125,6 @@
def _getusercls(space, cls, wants_del, reallywantdict=False):
from rpython.rlib import objectmodel
from pypy.objspace.std.objectobject import W_ObjectObject
- from pypy.module.__builtin__.interp_classobj import W_InstanceObject
from pypy.objspace.std.mapdict import (BaseUserClassMapdict,
MapdictDictSupport, MapdictWeakrefSupport,
_make_storage_mixin_size_n, MapdictStorageMixin)
@@ -133,7 +132,7 @@
name = cls.__name__ + "User"
mixins_needed = []
- if cls is W_ObjectObject or cls is W_InstanceObject:
+ if cls is W_ObjectObject:
mixins_needed.append(_make_storage_mixin_size_n())
else:
mixins_needed.append(MapdictStorageMixin)
From pypy.commits at gmail.com Sun May 1 17:26:22 2016
From: pypy.commits at gmail.com (rlamy)
Date: Sun, 01 May 2016 14:26:22 -0700 (PDT)
Subject: [pypy-commit] pypy py3k-update: Fix translation
Message-ID: <5726747e.82bb1c0a.4e99d.ffffe6e1@mx.google.com>
Author: Ronan Lamy
Branch: py3k-update
Changeset: r84109:4e240e7e8307
Date: 2016-05-01 22:25 +0100
http://bitbucket.org/pypy/pypy/changeset/4e240e7e8307/
Log: Fix translation
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -71,7 +71,7 @@
def _PyObject_GC_New(space, type):
return _PyObject_New(space, type)
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def _PyObject_GC_NewVar(space, type, itemcount):
return _PyObject_NewVar(space, type, itemcount)
@@ -446,7 +446,7 @@
bufferp[0] = rffi.cast(rffi.CCHARP, view.c_buf)
sizep[0] = view.c_len
-
+
if pb.c_bf_releasebuffer:
generic_cpy_call(space, pb.c_bf_releasebuffer,
obj, view)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -358,7 +358,7 @@
# PyPy supposes Py_UNICODE == wchar_t
return PyUnicode_FromUnicode(space, wchar_p, length)
- at cpython_api([PyObject, CONST_STRING], PyObject, result_is_ll=True)
+ at cpython_api([PyObject, CONST_STRING], PyObject)
def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors):
return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors)
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -470,7 +470,6 @@
self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
def user_setup(self, space, w_subtype):
- self.space = space
assert (not self.typedef.hasdict or
isinstance(w_subtype.terminator, NoDictTerminator))
self._mapdict_init_empty(w_subtype.terminator)
From pypy.commits at gmail.com Sun May 1 18:48:32 2016
From: pypy.commits at gmail.com (rlamy)
Date: Sun, 01 May 2016 15:48:32 -0700 (PDT)
Subject: [pypy-commit] pypy py3k-update: close branch before merging
Message-ID: <572687c0.2976c20a.45b3b.fffff154@mx.google.com>
Author: Ronan Lamy
Branch: py3k-update
Changeset: r84110:0b90cfe66049
Date: 2016-05-01 23:45 +0100
http://bitbucket.org/pypy/pypy/changeset/0b90cfe66049/
Log: close branch before merging
From pypy.commits at gmail.com Sun May 1 18:48:36 2016
From: pypy.commits at gmail.com (rlamy)
Date: Sun, 01 May 2016 15:48:36 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merge branch 'py3k-update'
Message-ID: <572687c4.47afc20a.9c2cb.fffff030@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r84111:5947b91767eb
Date: 2016-05-01 23:47 +0100
http://bitbucket.org/pypy/pypy/changeset/5947b91767eb/
Log: Merge branch 'py3k-update'
diff too long, truncating to 2000 out of 19510 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,5 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/TODO b/TODO
new file mode 100644
--- /dev/null
+++ b/TODO
@@ -0,0 +1,2 @@
+* reduce size of generated c code from slot definitions in slotdefs.
+* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
- def __hash__(self):
- raise TypeError("deque objects are unhashable")
+ __hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
- return struct_rusage((
- float(c_struct.ru_utime),
- float(c_struct.ru_stime),
- c_struct.ru_maxrss,
- c_struct.ru_ixrss,
- c_struct.ru_idrss,
- c_struct.ru_isrss,
- c_struct.ru_minflt,
- c_struct.ru_majflt,
- c_struct.ru_nswap,
- c_struct.ru_inblock,
- c_struct.ru_oublock,
- c_struct.ru_msgsnd,
- c_struct.ru_msgrcv,
- c_struct.ru_nsignals,
- c_struct.ru_nvcsw,
- c_struct.ru_nivcsw))
def wait3(options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait3(status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
def wait4(pid, options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait4(pid, status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+ for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include
+#include
+#include
+#include
+
+static const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+ { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+ return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+ return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+ struct rlimit rl;
+ if (getrlimit(resource, &rl) == -1)
+ return -1;
+ result[0] = rl.rlim_cur;
+ result[1] = rl.rlim_max;
+ return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+ struct rlimit rl;
+ rl.rlim_cur = cur & RLIM_INFINITY;
+ rl.rlim_max = max & RLIM_INFINITY;
+ return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+ ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -88,6 +89,12 @@
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
+ #
+ # input: `extern "Python+C" int foo(int);`
+ # output:
+ # void __cffi_extern_python_plus_c_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
- parts.append('void __cffi_extern_python_start; ')
+ if 'C' in match.group(1):
+ parts.append('void __cffi_extern_python_plus_c_start; ')
+ else:
+ parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
break
#
try:
- self._inside_extern_python = False
+ self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -376,8 +386,10 @@
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
- elif self._inside_extern_python:
+ elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
+ elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+ tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
- self._inside_extern_python = not self._inside_extern_python
- assert self._inside_extern_python == (
- decl.name == '__cffi_extern_python_start')
+ self._inside_extern_python = decl.name
else:
- if self._inside_extern_python:
+ if self._inside_extern_python !='__cffi_extern_python_stop':
raise api.CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -1145,11 +1145,11 @@
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
+ _generate_cpy_dllexport_python_collecttype = \
+ _generate_cpy_extern_python_plus_c_collecttype = \
+ _generate_cpy_extern_python_collecttype
- def _generate_cpy_dllexport_python_collecttype(self, tp, name):
- self._generate_cpy_extern_python_collecttype(tp, name)
-
- def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+ def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
@@ -1184,11 +1184,7 @@
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
- if dllexport:
- tag = 'CFFI_DLLEXPORT'
- else:
- tag = 'static'
- prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+ prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
@@ -1206,8 +1202,14 @@
prnt()
self._num_externpy += 1
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ self._extern_python_decl(tp, name, 'static ')
+
def _generate_cpy_dllexport_python_decl(self, tp, name):
- self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+ self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+ def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+ self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
@@ -1220,8 +1222,9 @@
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
- def _generate_cpy_dllexport_python_ctx(self, tp, name):
- self._generate_cpy_extern_python_ctx(tp, name)
+ _generate_cpy_dllexport_python_ctx = \
+ _generate_cpy_extern_python_plus_c_ctx = \
+ _generate_cpy_extern_python_ctx
def _string_literal(self, s):
def _char_repr(c):
diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
- size = 32 if sys.maxint <= 2**32 else 64
- filename = '_%s_%s_.py' % (basename, size)
- dumpcache.dumpcache(__file__, filename, config)
- #
- filename = os.path.join(os.path.dirname(__file__),
- '_%s_cache.py' % (basename,))
- g = open(filename, 'w')
- print >> g, '''\
-import sys
-_size = 32 if sys.maxsize <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
- globals(), locals(), ["*"], level=1)
-globals().update(_mod.__dict__)\
-''' % (basename,)
- g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
- ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
- 'LC_CTYPE',
- 'LC_TIME',
- 'LC_COLLATE',
- 'LC_MONETARY',
- 'LC_MESSAGES',
- 'LC_NUMERIC',
- 'LC_ALL',
- 'CHAR_MAX',
-]
-
-class LocaleConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
- 'locale.h'])
-for key in _CONSTANTS:
- setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
- if value is None:
- del config[key]
- _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
- # list of all possible names
- langinfo_names = [
- "RADIXCHAR", "THOUSEP", "CRNCYSTR",
- "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
- "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
- "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
- ]
- for i in range(1, 8):
- langinfo_names.append("DAY_%d" % i)
- langinfo_names.append("ABDAY_%d" % i)
- for i in range(1, 13):
- langinfo_names.append("MON_%d" % i)
- langinfo_names.append("ABMON_%d" % i)
-
- class LanginfoConfigure:
- _compilation_info_ = eci
- nl_item = SimpleType('nl_item')
- for key in langinfo_names:
- setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
- langinfo_config = configure(LanginfoConfigure)
- for key, value in langinfo_config.items():
- if value is None:
- del langinfo_config[key]
- langinfo_names.remove(key)
- config.update(langinfo_config)
- _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
- filename = os.path.join(_dirpath, name)
- d = {'__file__': filename}
- path = sys.path[:]
- try:
- sys.path.insert(0, _dirpath)
- execfile(filename, d)
- finally:
- sys.path[:] = path
-
-def try_rebuild():
- size = 32 if sys.maxint <= 2**32 else 64
- # remove the files '_*_size_.py'
- left = {}
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_%s_.py' % size) or
- p.endswith('_%s_.pyc' % size)):
- os.unlink(os.path.join(_dirpath, p))
- elif p.startswith('_') and (p.endswith('_.py') or
- p.endswith('_.pyc')):
- for i in range(2, len(p)-4):
- left[p[:i]] = True
- # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_cache.py') or
- p.endswith('_cache.pyc')):
- if p[:-9] not in left:
- os.unlink(os.path.join(_dirpath, p))
- #
- for p in os.listdir(_dirpath):
- if p.endswith('.ctc.py'):
- try:
- rebuild_one(p)
- except Exception, e:
- log.ERROR("Running %s:\n %s: %s" % (
- os.path.join(_dirpath, p),
- e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
- try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
- ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
- SimpleType)
-
-
-_CONSTANTS = (
- 'RLIM_INFINITY',
- 'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
- 'RLIMIT_CPU',
- 'RLIMIT_FSIZE',
- 'RLIMIT_DATA',
- 'RLIMIT_STACK',
- 'RLIMIT_CORE',
- 'RLIMIT_RSS',
- 'RLIMIT_NPROC',
- 'RLIMIT_NOFILE',
- 'RLIMIT_OFILE',
- 'RLIMIT_MEMLOCK',
- 'RLIMIT_AS',
- 'RLIMIT_LOCKS',
- 'RLIMIT_SIGPENDING',
- 'RLIMIT_MSGQUEUE',
- 'RLIMIT_NICE',
- 'RLIMIT_RTPRIO',
- 'RLIMIT_VMEM',
-
- 'RUSAGE_BOTH',
- 'RUSAGE_SELF',
- 'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
- rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
- setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
- setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
- if config[key] is not None:
- optional_constants.append(key)
- else:
- del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -1,4 +1,4 @@
-# ctypes implementation: Victor Stinner, 2008-05-08
+# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08
"""
This module provides access to the Unix password database.
It is available on all Unix versions.
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -1,15 +1,8 @@
-import sys
-if sys.platform == 'win32':
- raise ImportError('resource module not available for win32')
+"""http://docs.python.org/library/resource"""
-# load the platform-specific cache made by running resource.ctc.py
-from ctypes_config_cache._resource_cache import *
-
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, POINTER
+from _resource_cffi import ffi, lib
from errno import EINVAL, EPERM
-import _structseq
+import _structseq, os
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -18,104 +11,37 @@
class error(Exception):
pass
+class struct_rusage(metaclass=_structseq.structseqtype):
+ """struct_rusage: Result from getrusage.
-# Read required libc functions
-_getrusage = libc.getrusage
-_getrlimit = libc.getrlimit
-_setrlimit = libc.setrlimit
-try:
- _getpagesize = libc.getpagesize
- _getpagesize.argtypes = ()
- _getpagesize.restype = c_int
-except AttributeError:
- from os import sysconf
- _getpagesize = None
+This object may be accessed either as a tuple of
+ (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
+ nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
+or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
+ __metaclass__ = _structseq.structseqtype
-class timeval(Structure):
- _fields_ = (
- ("tv_sec", c_long),
- ("tv_usec", c_long),
- )
- def __str__(self):
- return "(%s, %s)" % (self.tv_sec, self.tv_usec)
+ ru_utime = _structseq.structseqfield(0, "user time used")
+ ru_stime = _structseq.structseqfield(1, "system time used")
+ ru_maxrss = _structseq.structseqfield(2, "max. resident set size")
+ ru_ixrss = _structseq.structseqfield(3, "shared memory size")
+ ru_idrss = _structseq.structseqfield(4, "unshared data size")
+ ru_isrss = _structseq.structseqfield(5, "unshared stack size")
+ ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O")
+ ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O")
+ ru_nswap = _structseq.structseqfield(8, "number of swap outs")
+ ru_inblock = _structseq.structseqfield(9, "block input operations")
+ ru_oublock = _structseq.structseqfield(10, "block output operations")
+ ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent")
+ ru_msgrcv = _structseq.structseqfield(12, "IPC messages received")
+ ru_nsignals = _structseq.structseqfield(13,"signals received")
+ ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches")
+ ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches")
- def __float__(self):
- return self.tv_sec + self.tv_usec/1000000.0
-
-class _struct_rusage(Structure):
- _fields_ = (
- ("ru_utime", timeval),
- ("ru_stime", timeval),
- ("ru_maxrss", c_long),
- ("ru_ixrss", c_long),
- ("ru_idrss", c_long),
- ("ru_isrss", c_long),
- ("ru_minflt", c_long),
- ("ru_majflt", c_long),
- ("ru_nswap", c_long),
- ("ru_inblock", c_long),
- ("ru_oublock", c_long),
- ("ru_msgsnd", c_long),
- ("ru_msgrcv", c_long),
- ("ru_nsignals", c_long),
- ("ru_nvcsw", c_long),
- ("ru_nivcsw", c_long),
- )
-
-_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
-_getrusage.restype = c_int
-
-
-class struct_rusage(metaclass=_structseq.structseqtype):
- ru_utime = _structseq.structseqfield(0)
- ru_stime = _structseq.structseqfield(1)
- ru_maxrss = _structseq.structseqfield(2)
- ru_ixrss = _structseq.structseqfield(3)
- ru_idrss = _structseq.structseqfield(4)
- ru_isrss = _structseq.structseqfield(5)
- ru_minflt = _structseq.structseqfield(6)
- ru_majflt = _structseq.structseqfield(7)
- ru_nswap = _structseq.structseqfield(8)
- ru_inblock = _structseq.structseqfield(9)
- ru_oublock = _structseq.structseqfield(10)
- ru_msgsnd = _structseq.structseqfield(11)
- ru_msgrcv = _structseq.structseqfield(12)
- ru_nsignals = _structseq.structseqfield(13)
- ru_nvcsw = _structseq.structseqfield(14)
- ru_nivcsw = _structseq.structseqfield(15)
-
- at builtinify
-def rlimit_check_bounds(rlim_cur, rlim_max):
- if rlim_cur > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_cur)
- if rlim_max > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_max)
-
-class rlimit(Structure):
- _fields_ = (
- ("rlim_cur", rlim_t),
- ("rlim_max", rlim_t),
- )
-
-_getrlimit.argtypes = (c_int, POINTER(rlimit))
-_getrlimit.restype = c_int
-_setrlimit.argtypes = (c_int, POINTER(rlimit))
-_setrlimit.restype = c_int
-
-
- at builtinify
-def getrusage(who):
- ru = _struct_rusage()
- ret = _getrusage(who, byref(ru))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- raise ValueError("invalid who parameter")
- raise error(errno)
+def _make_struct_rusage(ru):
return struct_rusage((
- float(ru.ru_utime),
- float(ru.ru_stime),
+ lib.my_utime(ru),
+ lib.my_stime(ru),
ru.ru_maxrss,
ru.ru_ixrss,
ru.ru_idrss,
@@ -133,48 +59,59 @@
))
@builtinify
+def getrusage(who):
+ ru = ffi.new("struct rusage *")
+ if lib.getrusage(who, ru) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("invalid who parameter")
+ raise error(ffi.errno)
+ return _make_struct_rusage(ru)
+
+ at builtinify
def getrlimit(resource):
- if not(0 <= resource < RLIM_NLIMITS):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlim = rlimit()
- ret = _getrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- raise error(errno)
- return (rlim.rlim_cur, rlim.rlim_max)
+ result = ffi.new("long long[2]")
+ if lib.my_getrlimit(resource, result) == -1:
+ raise error(ffi.errno)
+ return (result[0], result[1])
@builtinify
-def setrlimit(resource, rlim):
- if not(0 <= resource < RLIM_NLIMITS):
+def setrlimit(resource, limits):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlimit_check_bounds(*rlim)
- rlim = rlimit(rlim[0], rlim[1])
- ret = _setrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- return ValueError("current limit exceeds maximum limit")
- elif errno == EPERM:
- return ValueError("not allowed to raise maximum limit")
+ limits = tuple(limits)
+ if len(limits) != 2:
+ raise ValueError("expected a tuple of 2 integers")
+
+ if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("current limit exceeds maximum limit")
+ elif ffi.errno == EPERM:
+ raise ValueError("not allowed to raise maximum limit")
else:
- raise error(errno)
+ raise error(ffi.errno)
+
@builtinify
def getpagesize():
- if _getpagesize:
- return _getpagesize()
- else:
- try:
- return sysconf("SC_PAGE_SIZE")
- except ValueError:
- # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE
- return sysconf("SC_PAGESIZE")
+ return os.sysconf("SC_PAGESIZE")
-__all__ = ALL_CONSTANTS + (
- 'error', 'timeval', 'struct_rusage', 'rlimit',
- 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize',
+
+def _setup():
+ all_constants = []
+ p = lib.my_rlimit_consts
+ while p.name:
+ name = ffi.string(p.name)
+ globals()[name] = int(p.value)
+ all_constants.append(name)
+ p += 1
+ return all_constants
+
+__all__ = tuple(_setup()) + (
+ 'error', 'getpagesize', 'struct_rusage',
+ 'getrusage', 'getrlimit', 'setrlimit',
)
-
-del ALL_CONSTANTS
+del _setup
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -205,15 +205,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -223,34 +214,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -261,22 +232,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -292,14 +251,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -312,15 +267,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev
+ tk-dev libgc-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details ` are `available here `.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
User Class Optimizations
@@ -114,8 +120,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
What is PyPy?
=============
-In common parlance, PyPy has been used to mean two things. The first is the
-:ref:`RPython translation toolchain `, which is a framework for generating
-dynamic programming language implementations. And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself. It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things. The first is the
+:ref:`RPython translation toolchain ` for generating
+interpreters for dynamic programming languages. And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things. From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter. Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
:ref:`RPython translation toolchain ` when we mean the framework.
Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
--- a/pypy/doc/release-5.1.0.rst
+++ b/pypy/doc/release-5.1.0.rst
@@ -3,10 +3,17 @@
========
We have released PyPy 5.1, about a month after PyPy 5.0.
-We encourage all users of PyPy to update to this version. Apart from the usual
-bug fixes, there is an ongoing effort to improve the warmup time and memory
-usage of JIT-related metadata, and we now fully support the IBM s390x
-architecture.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
You can download the PyPy 5.1 release here:
@@ -26,6 +33,9 @@
.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
.. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
What is PyPy?
=============
@@ -46,7 +56,7 @@
* big- and little-endian variants of **PPC64** running Linux,
- * **s960x** running Linux
+ * **s390x** running Linux
.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
.. _`dynamic languages`: http://pypyjs.org
@@ -74,6 +84,8 @@
* Fix a corner case in the JIT
* Fix edge cases in the cpyext refcounting-compatible semantics
+ (more work on cpyext compatibility is coming in the ``cpyext-ext``
+ branch, but isn't ready yet)
* Try harder to not emit NEON instructions on ARM processors without NEON
support
@@ -92,11 +104,17 @@
* Fix sandbox startup (a regression in 5.0)
+ * Fix possible segfault for classes with mangled mro or __metaclass__
+
+ * Fix isinstance(deque(), Hashable) on the pure python deque
+
+ * Fix an issue with forkpty()
+
* Issues reported with our previous release were resolved_ after reports from users on
our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
#pypy
-* Numpy:
+* Numpy_:
* Implemented numpy.where for a single argument
@@ -108,6 +126,8 @@
functions exported from libpypy.so are declared in pypy_numpy.h, which is
included only when building our fork of numpy
+ * Add broadcast
+
* Performance improvements:
* Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting
@@ -119,14 +139,18 @@
* Remove the forced minor collection that occurs when rewriting the
assembler at the start of the JIT backend
+ * Port the resource module to cffi
+
* Internal refactorings:
* Use a simpler logger to speed up translation
* Drop vestiges of Python 2.5 support in testing
+ * Update rpython functions with ones needed for py3k
+
.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
-.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
Please update, and continue to help us make PyPy better.
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst
--- a/pypy/doc/whatsnew-5.1.0.rst
+++ b/pypy/doc/whatsnew-5.1.0.rst
@@ -60,3 +60,13 @@
Remove old uneeded numpy headers, what is left is only for testing. Also
generate pypy_numpy.h which exposes functions to directly use micronumpy
ndarray and ufuncs
+
+.. branch: rposix-for-3
+
+Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
+This updates the underlying rpython functions with the ones needed for the
+py3k branch
+
+.. branch: numpy_broadcast
+
+Add broadcast to micronumpy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,14 +3,61 @@
=========================
.. this is a revision shortly after release-5.1
-.. startrev: 2180e1eaf6f6
+.. startrev: aa60332382a1
-.. branch: rposix-for-3
+.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
-Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
-This updates the underlying rpython functions with the ones needed for the
-py3k branch
-
-.. branch: numpy_broadcast
+.. branch: gcheader-decl
-Add broadcast to micronumpy
+Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo. Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+
+Update cpyext C-API support After this branch, we are almost able to support
+upstream numpy via cpyext, so we created (yet another) fork of numpy at
+github.com/pypy/numpy with the needed changes. Among the significant changes
+to cpyext:
+ - allow c-snippet tests to be run with -A so we can verify we are compatible
+ - fix many edge cases exposed by fixing tests to run with -A
+ - issequence() logic matches cpython
+ - make PyStringObject and PyUnicodeObject field names compatible with cpython
+ - add prelminary support for PyDateTime_*
+ - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+ PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+ - PyAnySet_CheckExact, PyUnicode_Concat
+ - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+ primitives, also find a case where CPython will allow thread creation
+ before PyEval_InitThreads is run, dissallow on PyPy
+ - create a PyObject-specific list strategy
+ - rewrite slot assignment for typeobjects
+ - improve tracking of PyObject to rpython object mapping
+ - support tp_as_{number, sequence, mapping, buffer} slots
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -344,10 +344,6 @@
return PyPyJitPolicy(pypy_hooks)
def get_entry_point(self, config):
- from pypy.tool.lib_pypy import import_from_lib_pypy
- rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild')
- rebuild.try_rebuild()
-
space = make_objspace(config)
# manually imports app_main.py
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -87,7 +87,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
except SystemExit as e:
handle_sys_exit(e)
except BaseException as e:
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -53,24 +53,24 @@
n = self.root_node
if n.type == syms.file_input:
stmts = []
- for i in range(len(n.children) - 1):
- stmt = n.children[i]
+ for i in range(n.num_children() - 1):
+ stmt = n.get_child(i)
if stmt.type == tokens.NEWLINE:
continue
sub_stmts_count = self.number_of_statements(stmt)
if sub_stmts_count == 1:
stmts.append(self.handle_stmt(stmt))
else:
- stmt = stmt.children[0]
+ stmt = stmt.get_child(0)
for j in range(sub_stmts_count):
- small_stmt = stmt.children[j * 2]
+ small_stmt = stmt.get_child(j * 2)
stmts.append(self.handle_stmt(small_stmt))
return ast.Module(stmts)
elif n.type == syms.eval_input:
- body = self.handle_testlist(n.children[0])
+ body = self.handle_testlist(n.get_child(0))
return ast.Expression(body)
elif n.type == syms.single_input:
- first_child = n.children[0]
+ first_child = n.get_child(0)
if first_child.type == tokens.NEWLINE:
# An empty line.
return ast.Interactive([])
@@ -80,8 +80,8 @@
stmts = [self.handle_stmt(first_child)]
else:
stmts = []
- for i in range(0, len(first_child.children), 2):
- stmt = first_child.children[i]
+ for i in range(0, first_child.num_children(), 2):
+ stmt = first_child.get_child(i)
if stmt.type == tokens.NEWLINE:
break
stmts.append(self.handle_stmt(stmt))
@@ -95,16 +95,16 @@
if stmt_type == syms.compound_stmt:
return 1
elif stmt_type == syms.stmt:
- return self.number_of_statements(n.children[0])
+ return self.number_of_statements(n.get_child(0))
elif stmt_type == syms.simple_stmt:
# Divide to remove semi-colons.
- return len(n.children) // 2
+ return n.num_children() // 2
else:
raise AssertionError("non-statement node")
def error(self, msg, n):
"""Raise a SyntaxError with the lineno and column set to n's."""
- raise SyntaxError(msg, n.lineno, n.column,
+ raise SyntaxError(msg, n.get_lineno(), n.get_column(),
filename=self.compile_info.filename)
def error_ast(self, msg, ast_node):
@@ -130,34 +130,34 @@
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_del_stmt(self, del_node):
- targets = self.handle_exprlist(del_node.children[1], ast.Del)
- return ast.Delete(targets, del_node.lineno, del_node.column)
+ targets = self.handle_exprlist(del_node.get_child(1), ast.Del)
+ return ast.Delete(targets, del_node.get_lineno(), del_node.get_column())
def handle_flow_stmt(self, flow_node):
- first_child = flow_node.children[0]
+ first_child = flow_node.get_child(0)
first_child_type = first_child.type
if first_child_type == syms.break_stmt:
- return ast.Break(flow_node.lineno, flow_node.column)
+ return ast.Break(flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.continue_stmt:
- return ast.Continue(flow_node.lineno, flow_node.column)
+ return ast.Continue(flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.yield_stmt:
- yield_expr = self.handle_expr(first_child.children[0])
- return ast.Expr(yield_expr, flow_node.lineno, flow_node.column)
+ yield_expr = self.handle_expr(first_child.get_child(0))
+ return ast.Expr(yield_expr, flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.return_stmt:
- if len(first_child.children) == 1:
+ if first_child.num_children() == 1:
values = None
else:
- values = self.handle_testlist(first_child.children[1])
- return ast.Return(values, flow_node.lineno, flow_node.column)
+ values = self.handle_testlist(first_child.get_child(1))
+ return ast.Return(values, flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.raise_stmt:
exc = None
cause = None
- child_count = len(first_child.children)
+ child_count = first_child.num_children()
if child_count >= 2:
- exc = self.handle_expr(first_child.children[1])
+ exc = self.handle_expr(first_child.get_child(1))
if child_count >= 4:
- cause = self.handle_expr(first_child.children[3])
- return ast.Raise(exc, cause, flow_node.lineno, flow_node.column)
+ cause = self.handle_expr(first_child.get_child(3))
+ return ast.Raise(exc, cause, flow_node.get_lineno(), flow_node.get_column())
else:
raise AssertionError("unknown flow statement")
@@ -165,33 +165,33 @@
while True:
import_name_type = import_name.type
if import_name_type == syms.import_as_name:
- name = self.new_identifier(import_name.children[0].value)
- if len(import_name.children) == 3:
+ name = self.new_identifier(import_name.get_child(0).get_value())
+ if import_name.num_children() == 3:
as_name = self.new_identifier(
- import_name.children[2].value)
- self.check_forbidden_name(as_name, import_name.children[2])
+ import_name.get_child(2).get_value())
+ self.check_forbidden_name(as_name, import_name.get_child(2))
else:
as_name = None
- self.check_forbidden_name(name, import_name.children[0])
+ self.check_forbidden_name(name, import_name.get_child(0))
return ast.alias(name, as_name)
elif import_name_type == syms.dotted_as_name:
- if len(import_name.children) == 1:
- import_name = import_name.children[0]
+ if import_name.num_children() == 1:
+ import_name = import_name.get_child(0)
continue
- alias = self.alias_for_import_name(import_name.children[0],
+ alias = self.alias_for_import_name(import_name.get_child(0),
store=False)
- asname_node = import_name.children[2]
- alias.asname = self.new_identifier(asname_node.value)
+ asname_node = import_name.get_child(2)
+ alias.asname = self.new_identifier(asname_node.get_value())
self.check_forbidden_name(alias.asname, asname_node)
return alias
elif import_name_type == syms.dotted_name:
- if len(import_name.children) == 1:
- name = self.new_identifier(import_name.children[0].value)
+ if import_name.num_children() == 1:
+ name = self.new_identifier(import_name.get_child(0).get_value())
if store:
- self.check_forbidden_name(name, import_name.children[0])
+ self.check_forbidden_name(name, import_name.get_child(0))
return ast.alias(name, None)
- name_parts = [import_name.children[i].value
- for i in range(0, len(import_name.children), 2)]
+ name_parts = [import_name.get_child(i).get_value()
+ for i in range(0, import_name.num_children(), 2)]
name = ".".join(name_parts)
return ast.alias(name, None)
elif import_name_type == tokens.STAR:
@@ -200,20 +200,20 @@
raise AssertionError("unknown import name")
def handle_import_stmt(self, import_node):
- import_node = import_node.children[0]
+ import_node = import_node.get_child(0)
if import_node.type == syms.import_name:
- dotted_as_names = import_node.children[1]
- aliases = [self.alias_for_import_name(dotted_as_names.children[i])
- for i in range(0, len(dotted_as_names.children), 2)]
- return ast.Import(aliases, import_node.lineno, import_node.column)
+ dotted_as_names = import_node.get_child(1)
+ aliases = [self.alias_for_import_name(dotted_as_names.get_child(i))
+ for i in range(0, dotted_as_names.num_children(), 2)]
+ return ast.Import(aliases, import_node.get_lineno(), import_node.get_column())
elif import_node.type == syms.import_from:
- child_count = len(import_node.children)
+ child_count = import_node.num_children()
module = None
modname = None
i = 1
dot_count = 0
while i < child_count:
- child = import_node.children[i]
+ child = import_node.get_child(i)
child_type = child.type
if child_type == syms.dotted_name:
module = self.alias_for_import_name(child, False)
@@ -227,16 +227,16 @@
i += 1
dot_count += 1
i += 1
- after_import_type = import_node.children[i].type
+ after_import_type = import_node.get_child(i).type
star_import = False
if after_import_type == tokens.STAR:
- names_node = import_node.children[i]
+ names_node = import_node.get_child(i)
star_import = True
elif after_import_type == tokens.LPAR:
- names_node = import_node.children[i + 1]
+ names_node = import_node.get_child(i + 1)
elif after_import_type == syms.import_as_names:
- names_node = import_node.children[i]
- if len(names_node.children) % 2 == 0:
+ names_node = import_node.get_child(i)
+ if names_node.num_children() % 2 == 0:
self.error("trailing comma is only allowed with "
"surronding parenthesis", names_node)
else:
@@ -244,307 +244,308 @@
if star_import:
aliases = [self.alias_for_import_name(names_node)]
else:
- aliases = [self.alias_for_import_name(names_node.children[i])
- for i in range(0, len(names_node.children), 2)]
+ aliases = [self.alias_for_import_name(names_node.get_child(i))
+ for i in range(0, names_node.num_children(), 2)]
if module is not None:
modname = module.name
return ast.ImportFrom(modname, aliases, dot_count,
- import_node.lineno, import_node.column)
+ import_node.get_lineno(), import_node.get_column())
else:
raise AssertionError("unknown import node")
def handle_global_stmt(self, global_node):
- names = [self.new_identifier(global_node.children[i].value)
- for i in range(1, len(global_node.children), 2)]
- return ast.Global(names, global_node.lineno, global_node.column)
+ names = [self.new_identifier(global_node.get_child(i).get_value())
+ for i in range(1, global_node.num_children(), 2)]
+ return ast.Global(names, global_node.get_lineno(), global_node.get_column())
def handle_nonlocal_stmt(self, nonlocal_node):
- names = [self.new_identifier(nonlocal_node.children[i].value)
- for i in range(1, len(nonlocal_node.children), 2)]
- return ast.Nonlocal(names, nonlocal_node.lineno, nonlocal_node.column)
+ names = [self.new_identifier(nonlocal_node.get_child(i).get_value())
+ for i in range(1, nonlocal_node.num_children(), 2)]
+ return ast.Nonlocal(names, nonlocal_node.get_lineno(), nonlocal_node.get_column())
def handle_assert_stmt(self, assert_node):
- expr = self.handle_expr(assert_node.children[1])
+ expr = self.handle_expr(assert_node.get_child(1))
msg = None
- if len(assert_node.children) == 4:
- msg = self.handle_expr(assert_node.children[3])
- return ast.Assert(expr, msg, assert_node.lineno, assert_node.column)
+ if assert_node.num_children() == 4:
+ msg = self.handle_expr(assert_node.get_child(3))
+ return ast.Assert(expr, msg, assert_node.get_lineno(), assert_node.get_column())
def handle_suite(self, suite_node):
- first_child = suite_node.children[0]
+ first_child = suite_node.get_child(0)
if first_child.type == syms.simple_stmt:
- end = len(first_child.children) - 1
- if first_child.children[end - 1].type == tokens.SEMI:
+ end = first_child.num_children() - 1
+ if first_child.get_child(end - 1).type == tokens.SEMI:
end -= 1
- stmts = [self.handle_stmt(first_child.children[i])
+ stmts = [self.handle_stmt(first_child.get_child(i))
for i in range(0, end, 2)]
else:
stmts = []
- for i in range(2, len(suite_node.children) - 1):
- stmt = suite_node.children[i]
+ for i in range(2, suite_node.num_children() - 1):
+ stmt = suite_node.get_child(i)
stmt_count = self.number_of_statements(stmt)
if stmt_count == 1:
stmts.append(self.handle_stmt(stmt))
else:
- simple_stmt = stmt.children[0]
- for j in range(0, len(simple_stmt.children), 2):
- stmt = simple_stmt.children[j]
- if not stmt.children:
+ simple_stmt = stmt.get_child(0)
+ for j in range(0, simple_stmt.num_children(), 2):
+ stmt = simple_stmt.get_child(j)
+ if not stmt.num_children():
break
stmts.append(self.handle_stmt(stmt))
return stmts
def handle_if_stmt(self, if_node):
- child_count = len(if_node.children)
+ child_count = if_node.num_children()
if child_count == 4:
- test = self.handle_expr(if_node.children[1])
- suite = self.handle_suite(if_node.children[3])
- return ast.If(test, suite, None, if_node.lineno, if_node.column)
- otherwise_string = if_node.children[4].value
+ test = self.handle_expr(if_node.get_child(1))
+ suite = self.handle_suite(if_node.get_child(3))
+ return ast.If(test, suite, None, if_node.get_lineno(), if_node.get_column())
+ otherwise_string = if_node.get_child(4).get_value()
if otherwise_string == "else":
- test = self.handle_expr(if_node.children[1])
- suite = self.handle_suite(if_node.children[3])
- else_suite = self.handle_suite(if_node.children[6])
- return ast.If(test, suite, else_suite, if_node.lineno,
- if_node.column)
+ test = self.handle_expr(if_node.get_child(1))
+ suite = self.handle_suite(if_node.get_child(3))
+ else_suite = self.handle_suite(if_node.get_child(6))
+ return ast.If(test, suite, else_suite, if_node.get_lineno(),
+ if_node.get_column())
elif otherwise_string == "elif":
elif_count = child_count - 4
- after_elif = if_node.children[elif_count + 1]
+ after_elif = if_node.get_child(elif_count + 1)
if after_elif.type == tokens.NAME and \
- after_elif.value == "else":
+ after_elif.get_value() == "else":
has_else = True
elif_count -= 3
else:
has_else = False
elif_count /= 4
if has_else:
- last_elif = if_node.children[-6]
+ last_elif = if_node.get_child(-6)
last_elif_test = self.handle_expr(last_elif)
- elif_body = self.handle_suite(if_node.children[-4])
- else_body = self.handle_suite(if_node.children[-1])
+ elif_body = self.handle_suite(if_node.get_child(-4))
+ else_body = self.handle_suite(if_node.get_child(-1))
otherwise = [ast.If(last_elif_test, elif_body, else_body,
- last_elif.lineno, last_elif.column)]
+ last_elif.get_lineno(), last_elif.get_column())]
elif_count -= 1
else:
otherwise = None
From pypy.commits at gmail.com Mon May 2 00:47:10 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 21:47:10 -0700 (PDT)
Subject: [pypy-commit] pypy default: fix error message
Message-ID: <5726dbce.d81a1c0a.5dcde.4415@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84113:ad27d9cf6f2d
Date: 2016-05-01 21:42 -0700
http://bitbucket.org/pypy/pypy/changeset/ad27d9cf6f2d/
Log: fix error message
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -436,7 +436,7 @@
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected" % (s, self.templ_args[i])))
+ "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
From pypy.commits at gmail.com Mon May 2 00:47:12 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 21:47:12 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: merge default
Message-ID: <5726dbd0.89cbc20a.a5dd1.1859@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84114:796445937161
Date: 2016-05-01 21:43 -0700
http://bitbucket.org/pypy/pypy/changeset/796445937161/
Log: merge default
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -436,7 +436,7 @@
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected" % (s, self.templ_args[i])))
+ "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -568,7 +568,7 @@
msg = "Sign not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
- msg = "Alternate form not allowed in string format specifier"
+ msg = "Alternate form (#) not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._align == "=":
msg = "'=' alignment not allowed in string format specifier"
From pypy.commits at gmail.com Mon May 2 00:47:08 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 21:47:08 -0700 (PDT)
Subject: [pypy-commit] pypy default: match cpython error messages
Message-ID: <5726dbcc.109a1c0a.25620.3556@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84112:a0ed295ccdc9
Date: 2016-05-01 21:41 -0700
http://bitbucket.org/pypy/pypy/changeset/a0ed295ccdc9/
Log: match cpython error messages
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -560,7 +560,7 @@
msg = "Sign not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
- msg = "Alternate form not allowed in string format specifier"
+ msg = "Alternate form (#) not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._align == "=":
msg = "'=' alignment not allowed in string format specifier"
@@ -920,7 +920,7 @@
flags = 0
default_precision = 6
if self._alternate:
- msg = "alternate form not allowed in float formats"
+ msg = "Alternate form (#) not allowed in float formats"
raise OperationError(space.w_ValueError, space.wrap(msg))
tp = self._type
self._get_locale(tp)
@@ -998,9 +998,9 @@
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
#alternate is invalid
- msg = "Alternate form %s not allowed in complex format specifier"
+ msg = "Alternate form (#) not allowed in complex format specifier"
raise OperationError(space.w_ValueError,
- space.wrap(msg % (self._alternate)))
+ space.wrap(msg))
skip_re = 0
add_parens = 0
if tp == "\0":
From pypy.commits at gmail.com Mon May 2 00:47:14 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 21:47:14 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: merge py3k
Message-ID: <5726dbd2.161b1c0a.d0e1f.455a@mx.google.com>
Author: Philip Jenvey
Branch: py3.5
Changeset: r84115:ecd0020a0f93
Date: 2016-05-01 21:44 -0700
http://bitbucket.org/pypy/pypy/changeset/ecd0020a0f93/
Log: merge py3k
diff too long, truncating to 2000 out of 20403 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,5 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/TODO b/TODO
new file mode 100644
--- /dev/null
+++ b/TODO
@@ -0,0 +1,2 @@
+* reduce size of generated c code from slot definitions in slotdefs.
+* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py
--- a/lib-python/3/test/test_itertools.py
+++ b/lib-python/3/test/test_itertools.py
@@ -1281,6 +1281,7 @@
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
+ support.gc_collect()
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
- def __hash__(self):
- raise TypeError("deque objects are unhashable")
+ __hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
- return struct_rusage((
- float(c_struct.ru_utime),
- float(c_struct.ru_stime),
- c_struct.ru_maxrss,
- c_struct.ru_ixrss,
- c_struct.ru_idrss,
- c_struct.ru_isrss,
- c_struct.ru_minflt,
- c_struct.ru_majflt,
- c_struct.ru_nswap,
- c_struct.ru_inblock,
- c_struct.ru_oublock,
- c_struct.ru_msgsnd,
- c_struct.ru_msgrcv,
- c_struct.ru_nsignals,
- c_struct.ru_nvcsw,
- c_struct.ru_nivcsw))
def wait3(options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait3(status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
def wait4(pid, options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait4(pid, status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+ for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include
+#include
+#include
+#include
+
+static const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+ { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+ return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+ return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+ struct rlimit rl;
+ if (getrlimit(resource, &rl) == -1)
+ return -1;
+ result[0] = rl.rlim_cur;
+ result[1] = rl.rlim_max;
+ return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+ struct rlimit rl;
+ rl.rlim_cur = cur & RLIM_INFINITY;
+ rl.rlim_max = max & RLIM_INFINITY;
+ return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+ ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -88,6 +89,12 @@
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
+ #
+ # input: `extern "Python+C" int foo(int);`
+ # output:
+ # void __cffi_extern_python_plus_c_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
- parts.append('void __cffi_extern_python_start; ')
+ if 'C' in match.group(1):
+ parts.append('void __cffi_extern_python_plus_c_start; ')
+ else:
+ parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
break
#
try:
- self._inside_extern_python = False
+ self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -376,8 +386,10 @@
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
- elif self._inside_extern_python:
+ elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
+ elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+ tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
- self._inside_extern_python = not self._inside_extern_python
- assert self._inside_extern_python == (
- decl.name == '__cffi_extern_python_start')
+ self._inside_extern_python = decl.name
else:
- if self._inside_extern_python:
+ if self._inside_extern_python !='__cffi_extern_python_stop':
raise api.CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -1145,11 +1145,11 @@
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
+ _generate_cpy_dllexport_python_collecttype = \
+ _generate_cpy_extern_python_plus_c_collecttype = \
+ _generate_cpy_extern_python_collecttype
- def _generate_cpy_dllexport_python_collecttype(self, tp, name):
- self._generate_cpy_extern_python_collecttype(tp, name)
-
- def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+ def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
@@ -1184,11 +1184,7 @@
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
- if dllexport:
- tag = 'CFFI_DLLEXPORT'
- else:
- tag = 'static'
- prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+ prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
@@ -1206,8 +1202,14 @@
prnt()
self._num_externpy += 1
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ self._extern_python_decl(tp, name, 'static ')
+
def _generate_cpy_dllexport_python_decl(self, tp, name):
- self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+ self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+ def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+ self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
@@ -1220,8 +1222,9 @@
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
- def _generate_cpy_dllexport_python_ctx(self, tp, name):
- self._generate_cpy_extern_python_ctx(tp, name)
+ _generate_cpy_dllexport_python_ctx = \
+ _generate_cpy_extern_python_plus_c_ctx = \
+ _generate_cpy_extern_python_ctx
def _string_literal(self, s):
def _char_repr(c):
diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
- size = 32 if sys.maxint <= 2**32 else 64
- filename = '_%s_%s_.py' % (basename, size)
- dumpcache.dumpcache(__file__, filename, config)
- #
- filename = os.path.join(os.path.dirname(__file__),
- '_%s_cache.py' % (basename,))
- g = open(filename, 'w')
- print >> g, '''\
-import sys
-_size = 32 if sys.maxsize <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
- globals(), locals(), ["*"], level=1)
-globals().update(_mod.__dict__)\
-''' % (basename,)
- g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
- ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
- 'LC_CTYPE',
- 'LC_TIME',
- 'LC_COLLATE',
- 'LC_MONETARY',
- 'LC_MESSAGES',
- 'LC_NUMERIC',
- 'LC_ALL',
- 'CHAR_MAX',
-]
-
-class LocaleConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
- 'locale.h'])
-for key in _CONSTANTS:
- setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
- if value is None:
- del config[key]
- _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
- # list of all possible names
- langinfo_names = [
- "RADIXCHAR", "THOUSEP", "CRNCYSTR",
- "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
- "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
- "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
- ]
- for i in range(1, 8):
- langinfo_names.append("DAY_%d" % i)
- langinfo_names.append("ABDAY_%d" % i)
- for i in range(1, 13):
- langinfo_names.append("MON_%d" % i)
- langinfo_names.append("ABMON_%d" % i)
-
- class LanginfoConfigure:
- _compilation_info_ = eci
- nl_item = SimpleType('nl_item')
- for key in langinfo_names:
- setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
- langinfo_config = configure(LanginfoConfigure)
- for key, value in langinfo_config.items():
- if value is None:
- del langinfo_config[key]
- langinfo_names.remove(key)
- config.update(langinfo_config)
- _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
- filename = os.path.join(_dirpath, name)
- d = {'__file__': filename}
- path = sys.path[:]
- try:
- sys.path.insert(0, _dirpath)
- execfile(filename, d)
- finally:
- sys.path[:] = path
-
-def try_rebuild():
- size = 32 if sys.maxint <= 2**32 else 64
- # remove the files '_*_size_.py'
- left = {}
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_%s_.py' % size) or
- p.endswith('_%s_.pyc' % size)):
- os.unlink(os.path.join(_dirpath, p))
- elif p.startswith('_') and (p.endswith('_.py') or
- p.endswith('_.pyc')):
- for i in range(2, len(p)-4):
- left[p[:i]] = True
- # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_cache.py') or
- p.endswith('_cache.pyc')):
- if p[:-9] not in left:
- os.unlink(os.path.join(_dirpath, p))
- #
- for p in os.listdir(_dirpath):
- if p.endswith('.ctc.py'):
- try:
- rebuild_one(p)
- except Exception, e:
- log.ERROR("Running %s:\n %s: %s" % (
- os.path.join(_dirpath, p),
- e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
- try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
- ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
- SimpleType)
-
-
-_CONSTANTS = (
- 'RLIM_INFINITY',
- 'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
- 'RLIMIT_CPU',
- 'RLIMIT_FSIZE',
- 'RLIMIT_DATA',
- 'RLIMIT_STACK',
- 'RLIMIT_CORE',
- 'RLIMIT_RSS',
- 'RLIMIT_NPROC',
- 'RLIMIT_NOFILE',
- 'RLIMIT_OFILE',
- 'RLIMIT_MEMLOCK',
- 'RLIMIT_AS',
- 'RLIMIT_LOCKS',
- 'RLIMIT_SIGPENDING',
- 'RLIMIT_MSGQUEUE',
- 'RLIMIT_NICE',
- 'RLIMIT_RTPRIO',
- 'RLIMIT_VMEM',
-
- 'RUSAGE_BOTH',
- 'RUSAGE_SELF',
- 'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
- rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
- setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
- setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
- if config[key] is not None:
- optional_constants.append(key)
- else:
- del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -1,4 +1,4 @@
-# ctypes implementation: Victor Stinner, 2008-05-08
+# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08
"""
This module provides access to the Unix password database.
It is available on all Unix versions.
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -1,15 +1,8 @@
-import sys
-if sys.platform == 'win32':
- raise ImportError('resource module not available for win32')
+"""http://docs.python.org/library/resource"""
-# load the platform-specific cache made by running resource.ctc.py
-from ctypes_config_cache._resource_cache import *
-
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, POINTER
+from _resource_cffi import ffi, lib
from errno import EINVAL, EPERM
-import _structseq
+import _structseq, os
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -18,104 +11,37 @@
class error(Exception):
pass
+class struct_rusage(metaclass=_structseq.structseqtype):
+ """struct_rusage: Result from getrusage.
-# Read required libc functions
-_getrusage = libc.getrusage
-_getrlimit = libc.getrlimit
-_setrlimit = libc.setrlimit
-try:
- _getpagesize = libc.getpagesize
- _getpagesize.argtypes = ()
- _getpagesize.restype = c_int
-except AttributeError:
- from os import sysconf
- _getpagesize = None
+This object may be accessed either as a tuple of
+ (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
+ nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
+or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
+ __metaclass__ = _structseq.structseqtype
-class timeval(Structure):
- _fields_ = (
- ("tv_sec", c_long),
- ("tv_usec", c_long),
- )
- def __str__(self):
- return "(%s, %s)" % (self.tv_sec, self.tv_usec)
+ ru_utime = _structseq.structseqfield(0, "user time used")
+ ru_stime = _structseq.structseqfield(1, "system time used")
+ ru_maxrss = _structseq.structseqfield(2, "max. resident set size")
+ ru_ixrss = _structseq.structseqfield(3, "shared memory size")
+ ru_idrss = _structseq.structseqfield(4, "unshared data size")
+ ru_isrss = _structseq.structseqfield(5, "unshared stack size")
+ ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O")
+ ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O")
+ ru_nswap = _structseq.structseqfield(8, "number of swap outs")
+ ru_inblock = _structseq.structseqfield(9, "block input operations")
+ ru_oublock = _structseq.structseqfield(10, "block output operations")
+ ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent")
+ ru_msgrcv = _structseq.structseqfield(12, "IPC messages received")
+ ru_nsignals = _structseq.structseqfield(13,"signals received")
+ ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches")
+ ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches")
- def __float__(self):
- return self.tv_sec + self.tv_usec/1000000.0
-
-class _struct_rusage(Structure):
- _fields_ = (
- ("ru_utime", timeval),
- ("ru_stime", timeval),
- ("ru_maxrss", c_long),
- ("ru_ixrss", c_long),
- ("ru_idrss", c_long),
- ("ru_isrss", c_long),
- ("ru_minflt", c_long),
- ("ru_majflt", c_long),
- ("ru_nswap", c_long),
- ("ru_inblock", c_long),
- ("ru_oublock", c_long),
- ("ru_msgsnd", c_long),
- ("ru_msgrcv", c_long),
- ("ru_nsignals", c_long),
- ("ru_nvcsw", c_long),
- ("ru_nivcsw", c_long),
- )
-
-_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
-_getrusage.restype = c_int
-
-
-class struct_rusage(metaclass=_structseq.structseqtype):
- ru_utime = _structseq.structseqfield(0)
- ru_stime = _structseq.structseqfield(1)
- ru_maxrss = _structseq.structseqfield(2)
- ru_ixrss = _structseq.structseqfield(3)
- ru_idrss = _structseq.structseqfield(4)
- ru_isrss = _structseq.structseqfield(5)
- ru_minflt = _structseq.structseqfield(6)
- ru_majflt = _structseq.structseqfield(7)
- ru_nswap = _structseq.structseqfield(8)
- ru_inblock = _structseq.structseqfield(9)
- ru_oublock = _structseq.structseqfield(10)
- ru_msgsnd = _structseq.structseqfield(11)
- ru_msgrcv = _structseq.structseqfield(12)
- ru_nsignals = _structseq.structseqfield(13)
- ru_nvcsw = _structseq.structseqfield(14)
- ru_nivcsw = _structseq.structseqfield(15)
-
- at builtinify
-def rlimit_check_bounds(rlim_cur, rlim_max):
- if rlim_cur > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_cur)
- if rlim_max > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_max)
-
-class rlimit(Structure):
- _fields_ = (
- ("rlim_cur", rlim_t),
- ("rlim_max", rlim_t),
- )
-
-_getrlimit.argtypes = (c_int, POINTER(rlimit))
-_getrlimit.restype = c_int
-_setrlimit.argtypes = (c_int, POINTER(rlimit))
-_setrlimit.restype = c_int
-
-
- at builtinify
-def getrusage(who):
- ru = _struct_rusage()
- ret = _getrusage(who, byref(ru))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- raise ValueError("invalid who parameter")
- raise error(errno)
+def _make_struct_rusage(ru):
return struct_rusage((
- float(ru.ru_utime),
- float(ru.ru_stime),
+ lib.my_utime(ru),
+ lib.my_stime(ru),
ru.ru_maxrss,
ru.ru_ixrss,
ru.ru_idrss,
@@ -133,48 +59,59 @@
))
@builtinify
+def getrusage(who):
+ ru = ffi.new("struct rusage *")
+ if lib.getrusage(who, ru) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("invalid who parameter")
+ raise error(ffi.errno)
+ return _make_struct_rusage(ru)
+
+ at builtinify
def getrlimit(resource):
- if not(0 <= resource < RLIM_NLIMITS):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlim = rlimit()
- ret = _getrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- raise error(errno)
- return (rlim.rlim_cur, rlim.rlim_max)
+ result = ffi.new("long long[2]")
+ if lib.my_getrlimit(resource, result) == -1:
+ raise error(ffi.errno)
+ return (result[0], result[1])
@builtinify
-def setrlimit(resource, rlim):
- if not(0 <= resource < RLIM_NLIMITS):
+def setrlimit(resource, limits):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlimit_check_bounds(*rlim)
- rlim = rlimit(rlim[0], rlim[1])
- ret = _setrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- return ValueError("current limit exceeds maximum limit")
- elif errno == EPERM:
- return ValueError("not allowed to raise maximum limit")
+ limits = tuple(limits)
+ if len(limits) != 2:
+ raise ValueError("expected a tuple of 2 integers")
+
+ if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("current limit exceeds maximum limit")
+ elif ffi.errno == EPERM:
+ raise ValueError("not allowed to raise maximum limit")
else:
- raise error(errno)
+ raise error(ffi.errno)
+
@builtinify
def getpagesize():
- if _getpagesize:
- return _getpagesize()
- else:
- try:
- return sysconf("SC_PAGE_SIZE")
- except ValueError:
- # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE
- return sysconf("SC_PAGESIZE")
+ return os.sysconf("SC_PAGESIZE")
-__all__ = ALL_CONSTANTS + (
- 'error', 'timeval', 'struct_rusage', 'rlimit',
- 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize',
+
+def _setup():
+ all_constants = []
+ p = lib.my_rlimit_consts
+ while p.name:
+ name = ffi.string(p.name)
+ globals()[name] = int(p.value)
+ all_constants.append(name)
+ p += 1
+ return all_constants
+
+__all__ = tuple(_setup()) + (
+ 'error', 'getpagesize', 'struct_rusage',
+ 'getrusage', 'getrlimit', 'setrlimit',
)
-
-del ALL_CONSTANTS
+del _setup
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -205,15 +205,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -223,34 +214,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -261,22 +232,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -292,14 +251,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -312,15 +267,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev
+ tk-dev libgc-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details ` are `available here `.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
User Class Optimizations
@@ -114,8 +120,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
What is PyPy?
=============
-In common parlance, PyPy has been used to mean two things. The first is the
-:ref:`RPython translation toolchain `, which is a framework for generating
-dynamic programming language implementations. And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself. It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things. The first is the
+:ref:`RPython translation toolchain ` for generating
+interpreters for dynamic programming languages. And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things. From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter. Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
:ref:`RPython translation toolchain ` when we mean the framework.
Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
--- a/pypy/doc/release-5.1.0.rst
+++ b/pypy/doc/release-5.1.0.rst
@@ -3,10 +3,17 @@
========
We have released PyPy 5.1, about a month after PyPy 5.0.
-We encourage all users of PyPy to update to this version. Apart from the usual
-bug fixes, there is an ongoing effort to improve the warmup time and memory
-usage of JIT-related metadata, and we now fully support the IBM s390x
-architecture.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
You can download the PyPy 5.1 release here:
@@ -26,6 +33,9 @@
.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
.. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
What is PyPy?
=============
@@ -46,7 +56,7 @@
* big- and little-endian variants of **PPC64** running Linux,
- * **s960x** running Linux
+ * **s390x** running Linux
.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
.. _`dynamic languages`: http://pypyjs.org
@@ -74,6 +84,8 @@
* Fix a corner case in the JIT
* Fix edge cases in the cpyext refcounting-compatible semantics
+ (more work on cpyext compatibility is coming in the ``cpyext-ext``
+ branch, but isn't ready yet)
* Try harder to not emit NEON instructions on ARM processors without NEON
support
@@ -92,11 +104,17 @@
* Fix sandbox startup (a regression in 5.0)
+ * Fix possible segfault for classes with mangled mro or __metaclass__
+
+ * Fix isinstance(deque(), Hashable) on the pure python deque
+
+ * Fix an issue with forkpty()
+
* Issues reported with our previous release were resolved_ after reports from users on
our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
#pypy
-* Numpy:
+* Numpy_:
* Implemented numpy.where for a single argument
@@ -108,6 +126,8 @@
functions exported from libpypy.so are declared in pypy_numpy.h, which is
included only when building our fork of numpy
+ * Add broadcast
+
* Performance improvements:
* Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting
@@ -119,14 +139,18 @@
* Remove the forced minor collection that occurs when rewriting the
assembler at the start of the JIT backend
+ * Port the resource module to cffi
+
* Internal refactorings:
* Use a simpler logger to speed up translation
* Drop vestiges of Python 2.5 support in testing
+ * Update rpython functions with ones needed for py3k
+
.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
-.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
Please update, and continue to help us make PyPy better.
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst
--- a/pypy/doc/whatsnew-5.1.0.rst
+++ b/pypy/doc/whatsnew-5.1.0.rst
@@ -60,3 +60,13 @@
Remove old uneeded numpy headers, what is left is only for testing. Also
generate pypy_numpy.h which exposes functions to directly use micronumpy
ndarray and ufuncs
+
+.. branch: rposix-for-3
+
+Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
+This updates the underlying rpython functions with the ones needed for the
+py3k branch
+
+.. branch: numpy_broadcast
+
+Add broadcast to micronumpy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,14 +3,61 @@
=========================
.. this is a revision shortly after release-5.1
-.. startrev: 2180e1eaf6f6
+.. startrev: aa60332382a1
-.. branch: rposix-for-3
+.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
-Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
-This updates the underlying rpython functions with the ones needed for the
-py3k branch
-
-.. branch: numpy_broadcast
+.. branch: gcheader-decl
-Add broadcast to micronumpy
+Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo. Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+
+Update cpyext C-API support After this branch, we are almost able to support
+upstream numpy via cpyext, so we created (yet another) fork of numpy at
+github.com/pypy/numpy with the needed changes. Among the significant changes
+to cpyext:
+ - allow c-snippet tests to be run with -A so we can verify we are compatible
+ - fix many edge cases exposed by fixing tests to run with -A
+ - issequence() logic matches cpython
+ - make PyStringObject and PyUnicodeObject field names compatible with cpython
+ - add prelminary support for PyDateTime_*
+ - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+ PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+ - PyAnySet_CheckExact, PyUnicode_Concat
+ - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+ primitives, also find a case where CPython will allow thread creation
+ before PyEval_InitThreads is run, dissallow on PyPy
+ - create a PyObject-specific list strategy
+ - rewrite slot assignment for typeobjects
+ - improve tracking of PyObject to rpython object mapping
+ - support tp_as_{number, sequence, mapping, buffer} slots
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -344,10 +344,6 @@
return PyPyJitPolicy(pypy_hooks)
def get_entry_point(self, config):
- from pypy.tool.lib_pypy import import_from_lib_pypy
- rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild')
- rebuild.try_rebuild()
-
space = make_objspace(config)
# manually imports app_main.py
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -87,7 +87,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
except SystemExit as e:
handle_sys_exit(e)
except BaseException as e:
@@ -511,6 +515,7 @@
def exec_(src, dic):
exec(src, dic)
+ at hidden_applevel
def run_command_line(interactive,
inspect,
run_command,
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -53,24 +53,24 @@
n = self.root_node
if n.type == syms.file_input:
stmts = []
- for i in range(len(n.children) - 1):
- stmt = n.children[i]
+ for i in range(n.num_children() - 1):
+ stmt = n.get_child(i)
if stmt.type == tokens.NEWLINE:
continue
sub_stmts_count = self.number_of_statements(stmt)
if sub_stmts_count == 1:
stmts.append(self.handle_stmt(stmt))
else:
- stmt = stmt.children[0]
+ stmt = stmt.get_child(0)
for j in range(sub_stmts_count):
- small_stmt = stmt.children[j * 2]
+ small_stmt = stmt.get_child(j * 2)
stmts.append(self.handle_stmt(small_stmt))
return ast.Module(stmts)
elif n.type == syms.eval_input:
- body = self.handle_testlist(n.children[0])
+ body = self.handle_testlist(n.get_child(0))
return ast.Expression(body)
elif n.type == syms.single_input:
- first_child = n.children[0]
+ first_child = n.get_child(0)
if first_child.type == tokens.NEWLINE:
# An empty line.
return ast.Interactive([])
@@ -80,8 +80,8 @@
stmts = [self.handle_stmt(first_child)]
else:
stmts = []
- for i in range(0, len(first_child.children), 2):
- stmt = first_child.children[i]
+ for i in range(0, first_child.num_children(), 2):
+ stmt = first_child.get_child(i)
if stmt.type == tokens.NEWLINE:
break
stmts.append(self.handle_stmt(stmt))
@@ -95,16 +95,16 @@
if stmt_type == syms.compound_stmt:
return 1
elif stmt_type == syms.stmt:
- return self.number_of_statements(n.children[0])
+ return self.number_of_statements(n.get_child(0))
elif stmt_type == syms.simple_stmt:
# Divide to remove semi-colons.
- return len(n.children) // 2
+ return n.num_children() // 2
else:
raise AssertionError("non-statement node")
def error(self, msg, n):
"""Raise a SyntaxError with the lineno and column set to n's."""
- raise SyntaxError(msg, n.lineno, n.column,
+ raise SyntaxError(msg, n.get_lineno(), n.get_column(),
filename=self.compile_info.filename)
def error_ast(self, msg, ast_node):
@@ -130,34 +130,34 @@
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_del_stmt(self, del_node):
- targets = self.handle_exprlist(del_node.children[1], ast.Del)
- return ast.Delete(targets, del_node.lineno, del_node.column)
+ targets = self.handle_exprlist(del_node.get_child(1), ast.Del)
+ return ast.Delete(targets, del_node.get_lineno(), del_node.get_column())
def handle_flow_stmt(self, flow_node):
- first_child = flow_node.children[0]
+ first_child = flow_node.get_child(0)
first_child_type = first_child.type
if first_child_type == syms.break_stmt:
- return ast.Break(flow_node.lineno, flow_node.column)
+ return ast.Break(flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.continue_stmt:
- return ast.Continue(flow_node.lineno, flow_node.column)
+ return ast.Continue(flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.yield_stmt:
- yield_expr = self.handle_expr(first_child.children[0])
- return ast.Expr(yield_expr, flow_node.lineno, flow_node.column)
+ yield_expr = self.handle_expr(first_child.get_child(0))
+ return ast.Expr(yield_expr, flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.return_stmt:
- if len(first_child.children) == 1:
+ if first_child.num_children() == 1:
values = None
else:
- values = self.handle_testlist(first_child.children[1])
- return ast.Return(values, flow_node.lineno, flow_node.column)
+ values = self.handle_testlist(first_child.get_child(1))
+ return ast.Return(values, flow_node.get_lineno(), flow_node.get_column())
elif first_child_type == syms.raise_stmt:
exc = None
cause = None
- child_count = len(first_child.children)
+ child_count = first_child.num_children()
if child_count >= 2:
- exc = self.handle_expr(first_child.children[1])
+ exc = self.handle_expr(first_child.get_child(1))
if child_count >= 4:
- cause = self.handle_expr(first_child.children[3])
- return ast.Raise(exc, cause, flow_node.lineno, flow_node.column)
+ cause = self.handle_expr(first_child.get_child(3))
+ return ast.Raise(exc, cause, flow_node.get_lineno(), flow_node.get_column())
else:
raise AssertionError("unknown flow statement")
@@ -165,33 +165,33 @@
while True:
import_name_type = import_name.type
if import_name_type == syms.import_as_name:
- name = self.new_identifier(import_name.children[0].value)
- if len(import_name.children) == 3:
+ name = self.new_identifier(import_name.get_child(0).get_value())
+ if import_name.num_children() == 3:
as_name = self.new_identifier(
- import_name.children[2].value)
- self.check_forbidden_name(as_name, import_name.children[2])
+ import_name.get_child(2).get_value())
+ self.check_forbidden_name(as_name, import_name.get_child(2))
else:
as_name = None
- self.check_forbidden_name(name, import_name.children[0])
+ self.check_forbidden_name(name, import_name.get_child(0))
return ast.alias(name, as_name)
elif import_name_type == syms.dotted_as_name:
- if len(import_name.children) == 1:
- import_name = import_name.children[0]
+ if import_name.num_children() == 1:
+ import_name = import_name.get_child(0)
continue
- alias = self.alias_for_import_name(import_name.children[0],
+ alias = self.alias_for_import_name(import_name.get_child(0),
store=False)
- asname_node = import_name.children[2]
- alias.asname = self.new_identifier(asname_node.value)
+ asname_node = import_name.get_child(2)
+ alias.asname = self.new_identifier(asname_node.get_value())
self.check_forbidden_name(alias.asname, asname_node)
return alias
elif import_name_type == syms.dotted_name:
- if len(import_name.children) == 1:
- name = self.new_identifier(import_name.children[0].value)
+ if import_name.num_children() == 1:
+ name = self.new_identifier(import_name.get_child(0).get_value())
if store:
- self.check_forbidden_name(name, import_name.children[0])
+ self.check_forbidden_name(name, import_name.get_child(0))
return ast.alias(name, None)
- name_parts = [import_name.children[i].value
- for i in range(0, len(import_name.children), 2)]
+ name_parts = [import_name.get_child(i).get_value()
+ for i in range(0, import_name.num_children(), 2)]
name = ".".join(name_parts)
return ast.alias(name, None)
elif import_name_type == tokens.STAR:
@@ -200,20 +200,20 @@
raise AssertionError("unknown import name")
def handle_import_stmt(self, import_node):
- import_node = import_node.children[0]
+ import_node = import_node.get_child(0)
if import_node.type == syms.import_name:
- dotted_as_names = import_node.children[1]
- aliases = [self.alias_for_import_name(dotted_as_names.children[i])
- for i in range(0, len(dotted_as_names.children), 2)]
- return ast.Import(aliases, import_node.lineno, import_node.column)
+ dotted_as_names = import_node.get_child(1)
+ aliases = [self.alias_for_import_name(dotted_as_names.get_child(i))
+ for i in range(0, dotted_as_names.num_children(), 2)]
+ return ast.Import(aliases, import_node.get_lineno(), import_node.get_column())
elif import_node.type == syms.import_from:
- child_count = len(import_node.children)
+ child_count = import_node.num_children()
module = None
modname = None
i = 1
dot_count = 0
while i < child_count:
- child = import_node.children[i]
+ child = import_node.get_child(i)
child_type = child.type
if child_type == syms.dotted_name:
module = self.alias_for_import_name(child, False)
@@ -227,16 +227,16 @@
i += 1
dot_count += 1
i += 1
- after_import_type = import_node.children[i].type
+ after_import_type = import_node.get_child(i).type
star_import = False
if after_import_type == tokens.STAR:
- names_node = import_node.children[i]
+ names_node = import_node.get_child(i)
star_import = True
elif after_import_type == tokens.LPAR:
- names_node = import_node.children[i + 1]
+ names_node = import_node.get_child(i + 1)
elif after_import_type == syms.import_as_names:
- names_node = import_node.children[i]
- if len(names_node.children) % 2 == 0:
+ names_node = import_node.get_child(i)
+ if names_node.num_children() % 2 == 0:
self.error("trailing comma is only allowed with "
"surronding parenthesis", names_node)
else:
@@ -244,307 +244,308 @@
if star_import:
aliases = [self.alias_for_import_name(names_node)]
else:
- aliases = [self.alias_for_import_name(names_node.children[i])
- for i in range(0, len(names_node.children), 2)]
+ aliases = [self.alias_for_import_name(names_node.get_child(i))
+ for i in range(0, names_node.num_children(), 2)]
if module is not None:
modname = module.name
return ast.ImportFrom(modname, aliases, dot_count,
- import_node.lineno, import_node.column)
+ import_node.get_lineno(), import_node.get_column())
else:
raise AssertionError("unknown import node")
def handle_global_stmt(self, global_node):
- names = [self.new_identifier(global_node.children[i].value)
- for i in range(1, len(global_node.children), 2)]
- return ast.Global(names, global_node.lineno, global_node.column)
+ names = [self.new_identifier(global_node.get_child(i).get_value())
+ for i in range(1, global_node.num_children(), 2)]
+ return ast.Global(names, global_node.get_lineno(), global_node.get_column())
def handle_nonlocal_stmt(self, nonlocal_node):
- names = [self.new_identifier(nonlocal_node.children[i].value)
- for i in range(1, len(nonlocal_node.children), 2)]
- return ast.Nonlocal(names, nonlocal_node.lineno, nonlocal_node.column)
+ names = [self.new_identifier(nonlocal_node.get_child(i).get_value())
+ for i in range(1, nonlocal_node.num_children(), 2)]
+ return ast.Nonlocal(names, nonlocal_node.get_lineno(), nonlocal_node.get_column())
def handle_assert_stmt(self, assert_node):
- expr = self.handle_expr(assert_node.children[1])
+ expr = self.handle_expr(assert_node.get_child(1))
msg = None
- if len(assert_node.children) == 4:
- msg = self.handle_expr(assert_node.children[3])
- return ast.Assert(expr, msg, assert_node.lineno, assert_node.column)
+ if assert_node.num_children() == 4:
+ msg = self.handle_expr(assert_node.get_child(3))
+ return ast.Assert(expr, msg, assert_node.get_lineno(), assert_node.get_column())
def handle_suite(self, suite_node):
- first_child = suite_node.children[0]
+ first_child = suite_node.get_child(0)
if first_child.type == syms.simple_stmt:
- end = len(first_child.children) - 1
- if first_child.children[end - 1].type == tokens.SEMI:
+ end = first_child.num_children() - 1
+ if first_child.get_child(end - 1).type == tokens.SEMI:
end -= 1
- stmts = [self.handle_stmt(first_child.children[i])
+ stmts = [self.handle_stmt(first_child.get_child(i))
for i in range(0, end, 2)]
else:
stmts = []
- for i in range(2, len(suite_node.children) - 1):
- stmt = suite_node.children[i]
+ for i in range(2, suite_node.num_children() - 1):
+ stmt = suite_node.get_child(i)
stmt_count = self.number_of_statements(stmt)
if stmt_count == 1:
stmts.append(self.handle_stmt(stmt))
else:
- simple_stmt = stmt.children[0]
- for j in range(0, len(simple_stmt.children), 2):
- stmt = simple_stmt.children[j]
- if not stmt.children:
+ simple_stmt = stmt.get_child(0)
+ for j in range(0, simple_stmt.num_children(), 2):
+ stmt = simple_stmt.get_child(j)
+ if not stmt.num_children():
break
stmts.append(self.handle_stmt(stmt))
return stmts
def handle_if_stmt(self, if_node):
- child_count = len(if_node.children)
+ child_count = if_node.num_children()
if child_count == 4:
- test = self.handle_expr(if_node.children[1])
- suite = self.handle_suite(if_node.children[3])
- return ast.If(test, suite, None, if_node.lineno, if_node.column)
- otherwise_string = if_node.children[4].value
+ test = self.handle_expr(if_node.get_child(1))
+ suite = self.handle_suite(if_node.get_child(3))
+ return ast.If(test, suite, None, if_node.get_lineno(), if_node.get_column())
+ otherwise_string = if_node.get_child(4).get_value()
if otherwise_string == "else":
- test = self.handle_expr(if_node.children[1])
- suite = self.handle_suite(if_node.children[3])
- else_suite = self.handle_suite(if_node.children[6])
- return ast.If(test, suite, else_suite, if_node.lineno,
- if_node.column)
+ test = self.handle_expr(if_node.get_child(1))
+ suite = self.handle_suite(if_node.get_child(3))
+ else_suite = self.handle_suite(if_node.get_child(6))
+ return ast.If(test, suite, else_suite, if_node.get_lineno(),
+ if_node.get_column())
elif otherwise_string == "elif":
elif_count = child_count - 4
- after_elif = if_node.children[elif_count + 1]
+ after_elif = if_node.get_child(elif_count + 1)
if after_elif.type == tokens.NAME and \
- after_elif.value == "else":
+ after_elif.get_value() == "else":
From pypy.commits at gmail.com Mon May 2 01:12:43 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 22:12:43 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: OperationError apocalypse
Message-ID: <5726e1cb.878d1c0a.ed012.45ed@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84116:31ebe44e9a13
Date: 2016-05-01 22:08 -0700
http://bitbucket.org/pypy/pypy/changeset/31ebe44e9a13/
Log: OperationError apocalypse
From pypy.commits at gmail.com Mon May 2 01:12:45 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 22:12:45 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: oefmt pypy/interpreter/
Message-ID: <5726e1cd.c30a1c0a.18ac.469d@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84117:77443b718701
Date: 2016-05-01 22:08 -0700
http://bitbucket.org/pypy/pypy/changeset/77443b718701/
Log: oefmt pypy/interpreter/
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -354,9 +354,7 @@
key = space.str_w(w_key)
except OperationError, e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,8 +16,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -399,8 +399,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -67,8 +67,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -706,8 +706,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -901,8 +900,7 @@
raise
break # done
if idx == expected_length:
- raise OperationError(self.w_ValueError,
- self.wrap("too many values to unpack"))
+ raise oefmt(self.w_ValueError, "too many values to unpack")
items[idx] = w_item
idx += 1
if idx < expected_length:
@@ -962,8 +960,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1330,8 +1328,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1351,8 +1348,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1396,20 +1392,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1555,8 +1548,8 @@
from rpython.rlib import rstring
result = w_obj.str_w(self)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1596,8 +1589,7 @@
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.isinstance_w(w_obj, self.w_str):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a string'))
+ raise oefmt(self.w_TypeError, "argument must be a string")
return self.str_w(w_obj)
def unicode_w(self, w_obj):
@@ -1608,16 +1600,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def bool_w(self, w_obj):
@@ -1636,8 +1628,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1645,8 +1637,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1654,8 +1645,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1663,8 +1653,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1673,11 +1663,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1733,17 +1721,15 @@
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
except OperationError, e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno() "
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if (not self.isinstance_w(w_fd, self.w_int) and
not self.isinstance_w(w_fd, self.w_long)):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
except OperationError, e:
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -214,9 +214,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -202,16 +202,15 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting function's dictionary to a non-dict")
- )
+ raise oefmt(space.w_TypeError,
+ "setting function's dictionary to a non-dict")
self.w_func_dict = w_dict
def descr_function__new__(space, w_subtype, w_code, w_globals,
w_name=None, w_argdefs=None, w_closure=None):
code = space.interp_w(Code, w_code)
if not space.isinstance_w(w_globals, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("expected dict"))
+ raise oefmt(space.w_TypeError, "expected dict")
if not space.is_none(w_name):
name = space.str_w(w_name)
else:
@@ -227,15 +226,15 @@
if space.is_none(w_closure) and nfreevars == 0:
closure = None
elif not space.is_w(space.type(w_closure), space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("invalid closure"))
+ raise oefmt(space.w_TypeError, "invalid closure")
else:
from pypy.interpreter.nestedscope import Cell
closure_w = space.unpackiterable(w_closure)
n = len(closure_w)
if nfreevars == 0:
- raise OperationError(space.w_ValueError, space.wrap("no closure needed"))
+ raise oefmt(space.w_ValueError, "no closure needed")
elif nfreevars != n:
- raise OperationError(space.w_ValueError, space.wrap("closure is wrong size"))
+ raise oefmt(space.w_ValueError, "closure is wrong size")
closure = [space.interp_w(Cell, w_cell) for w_cell in closure_w]
func = space.allocate_instance(Function, w_subtype)
Function.__init__(func, space, code, w_globals, defs_w, closure, name)
@@ -321,8 +320,8 @@
w_func_dict, w_module) = args_w
except ValueError:
# wrong args
- raise OperationError(space.w_ValueError,
- space.wrap("Wrong arguments to function.__setstate__"))
+ raise oefmt(space.w_ValueError,
+ "Wrong arguments to function.__setstate__")
self.space = space
self.name = space.str_w(w_name)
@@ -359,7 +358,8 @@
self.defs_w = []
return
if not space.isinstance_w(w_defaults, space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("func_defaults must be set to a tuple object or None"))
+ raise oefmt(space.w_TypeError,
+ "func_defaults must be set to a tuple object or None")
self.defs_w = space.fixedview(w_defaults)
def fdel_func_defaults(self, space):
@@ -380,8 +380,8 @@
if space.isinstance_w(w_name, space.w_str):
self.name = space.str_w(w_name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be set to a string object"))
+ raise oefmt(space.w_TypeError,
+ "__name__ must be set to a string object")
def fdel_func_doc(self, space):
self.w_doc = space.w_None
@@ -406,8 +406,8 @@
def fset_func_code(self, space, w_code):
from pypy.interpreter.pycode import PyCode
if not self.can_change_code:
- raise OperationError(space.w_TypeError,
- space.wrap("Cannot change code attribute of builtin functions"))
+ raise oefmt(space.w_TypeError,
+ "Cannot change code attribute of builtin functions")
code = space.interp_w(Code, w_code)
closure_len = 0
if self.closure:
@@ -457,8 +457,7 @@
if space.is_w(w_instance, space.w_None):
w_instance = None
if w_instance is None and space.is_none(w_class):
- raise OperationError(space.w_TypeError,
- space.wrap("unbound methods must have class"))
+ raise oefmt(space.w_TypeError, "unbound methods must have class")
method = space.allocate_instance(Method, w_subtype)
Method.__init__(method, space, w_function, w_instance, w_class)
return space.wrap(method)
@@ -659,8 +658,8 @@
self.w_module = func.w_module
def descr_builtinfunction__new__(space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("cannot create 'builtin_function' instances"))
+ raise oefmt(space.w_TypeError,
+ "cannot create 'builtin_function' instances")
def descr_function_repr(self):
return self.space.wrap('' % (self.name,))
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -21,7 +21,7 @@
from pypy.interpreter.signature import Signature
from pypy.interpreter.baseobjspace import (W_Root, ObjSpace, SpaceCache,
DescrMismatch)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import ClassMethod, FunctionWithFixedCode
from rpython.rlib import rstackovf
from rpython.rlib.objectmodel import we_are_translated
@@ -699,14 +699,13 @@
raise
raise e
except KeyboardInterrupt:
- raise OperationError(space.w_KeyboardInterrupt,
- space.w_None)
+ raise OperationError(space.w_KeyboardInterrupt, space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
except rstackovf.StackOverflow, e:
rstackovf.check_stack_overflow()
- raise OperationError(space.w_RuntimeError,
- space.wrap("maximum recursion depth exceeded"))
+ raise oefmt(space.w_RuntimeError,
+ "maximum recursion depth exceeded")
except RuntimeError: # not on top of py.py
raise OperationError(space.w_RuntimeError, space.w_None)
@@ -762,8 +761,7 @@
try:
w_result = self.fastfunc_0(space)
except DescrMismatch:
- raise OperationError(space.w_SystemError,
- space.wrap("unexpected DescrMismatch error"))
+ raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
except Exception, e:
self.handle_exception(space, e)
w_result = None
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
from rpython.rlib import jit
@@ -76,8 +76,7 @@
def _send_ex(self, w_arg, operr):
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None:
# xxx a bit ad-hoc, but we don't want to go inside
@@ -89,8 +88,9 @@
last_instr = jit.promote(frame.last_instr)
if last_instr == -1:
if w_arg and not space.is_w(w_arg, space.w_None):
- msg = "can't send non-None value to a just-started generator"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can't send non-None value to a just-started "
+ "generator")
else:
if not w_arg:
w_arg = space.w_None
@@ -151,8 +151,8 @@
raise
if w_retval is not None:
- msg = "generator ignored GeneratorExit"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "generator ignored GeneratorExit")
def descr_gi_frame(self, space):
if self.frame is not None and not self.frame.frame_finished_execution:
@@ -184,8 +184,7 @@
# XXX copied and simplified version of send_ex()
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None: # already finished
return
diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
--- a/pypy/interpreter/nestedscope.py
+++ b/pypy/interpreter/nestedscope.py
@@ -1,7 +1,7 @@
from rpython.tool.uid import uid
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.mixedmodule import MixedModule
@@ -78,4 +78,4 @@
try:
return self.get()
except ValueError:
- raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
+ raise oefmt(space.w_ValueError, "Cell is empty")
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -8,7 +8,7 @@
from pypy.interpreter import eval
from pypy.interpreter.signature import Signature
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.astcompiler.consts import (
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
@@ -374,14 +374,13 @@
lnotab, w_freevars=None, w_cellvars=None,
magic=default_magic):
if argcount < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: argcount must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: argcount must not be negative")
if nlocals < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: nlocals must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: nlocals must not be negative")
if not space.isinstance_w(w_constants, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("Expected tuple for constants"))
+ raise oefmt(space.w_TypeError, "Expected tuple for constants")
consts_w = space.fixedview(w_constants)
names = unpack_str_tuple(space, w_names)
varnames = unpack_str_tuple(space, w_varnames)
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -7,7 +7,7 @@
from pypy.interpreter.pyparser import future, pyparse, error as parseerror
from pypy.interpreter.astcompiler import (astbuilder, codegen, consts, misc,
optimize, ast)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class AbstractCompiler(object):
@@ -116,8 +116,7 @@
else:
check = True
if not check:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "invalid node type"))
+ raise oefmt(self.space.w_TypeError, "invalid node type")
fut = misc.parse_future(node, self.future_flags.compiler_features)
f_flags, f_lineno, f_col = fut
@@ -132,8 +131,7 @@
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
except parseerror.SyntaxError, e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return code
def compile_to_ast(self, source, filename, mode, flags):
@@ -146,11 +144,9 @@
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
except parseerror.IndentationError, e:
- raise OperationError(space.w_IndentationError,
- e.wrap_info(space))
+ raise OperationError(space.w_IndentationError, e.wrap_info(space))
except parseerror.SyntaxError, e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return mod
def compile(self, source, filename, mode, flags, hidden_applevel=False):
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -220,9 +220,9 @@
return # no cells needed - fast path
elif outer_func is None:
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("directly executed code object "
- "may not contain free variables"))
+ raise oefmt(space.w_TypeError,
+ "directly executed code object may not contain free "
+ "variables")
if outer_func and outer_func.closure:
closure_size = len(outer_func.closure)
else:
@@ -513,7 +513,7 @@
self.locals_cells_stack_w = values_w[:]
valuestackdepth = space.int_w(w_stackdepth)
if not self._check_stack_index(valuestackdepth):
- raise OperationError(space.w_ValueError, space.wrap("invalid stackdepth"))
+ raise oefmt(space.w_ValueError, "invalid stackdepth")
assert valuestackdepth >= 0
self.valuestackdepth = valuestackdepth
if space.is_w(w_exc_value, space.w_None):
@@ -686,12 +686,11 @@
try:
new_lineno = space.int_w(w_new_lineno)
except OperationError:
- raise OperationError(space.w_ValueError,
- space.wrap("lineno must be an integer"))
+ raise oefmt(space.w_ValueError, "lineno must be an integer")
if self.get_w_f_trace() is None:
- raise OperationError(space.w_ValueError,
- space.wrap("f_lineno can only be set by a trace function."))
+ raise oefmt(space.w_ValueError,
+ "f_lineno can only be set by a trace function.")
line = self.pycode.co_firstlineno
if new_lineno < line:
@@ -718,8 +717,8 @@
# Don't jump to a line with an except in it.
code = self.pycode.co_code
if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump to 'except' line as there's no exception"))
+ raise oefmt(space.w_ValueError,
+ "can't jump to 'except' line as there's no exception")
# Don't jump into or out of a finally block.
f_lasti_setup_addr = -1
@@ -800,8 +799,8 @@
new_iblock = f_iblock - delta_iblock
if new_iblock > min_iblock:
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump into the middle of a block"))
+ raise oefmt(space.w_ValueError,
+ "can't jump into the middle of a block")
while f_iblock > new_iblock:
block = self.pop_block()
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -253,8 +253,7 @@
def unknown_objclass_getter(space):
# NB. this is an AttributeError to make inspect.py happy
- raise OperationError(space.w_AttributeError,
- space.wrap("generic property has no __objclass__"))
+ raise oefmt(space.w_AttributeError, "generic property has no __objclass__")
@specialize.arg(0)
def make_objclass_getter(tag, func, cls):
@@ -328,8 +327,7 @@
Change the value of the property of the given obj."""
fset = self.fset
if fset is None:
- raise OperationError(space.w_TypeError,
- space.wrap("readonly attribute"))
+ raise oefmt(space.w_TypeError, "readonly attribute")
try:
fset(self, space, w_obj, w_value)
except DescrMismatch:
@@ -344,8 +342,7 @@
Delete the value of the property from the given obj."""
fdel = self.fdel
if fdel is None:
- raise OperationError(space.w_AttributeError,
- space.wrap("cannot delete attribute"))
+ raise oefmt(space.w_AttributeError, "cannot delete attribute")
try:
fdel(self, space, w_obj)
except DescrMismatch:
From pypy.commits at gmail.com Mon May 2 01:12:47 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 22:12:47 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: fix test, this is lazy now
Message-ID: <5726e1cf.d2aa1c0a.1ecec.ffffd5b4@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84118:2faccce3d0dd
Date: 2016-05-01 22:09 -0700
http://bitbucket.org/pypy/pypy/changeset/2faccce3d0dd/
Log: fix test, this is lazy now
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -348,7 +348,7 @@
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={None: 1})
assert excinfo.value.w_type is TypeError
- assert excinfo.value._w_value is not None
+ assert excinfo.value._w_value is None
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={valuedummy: 1})
assert excinfo.value.w_type is ValueError
From pypy.commits at gmail.com Mon May 2 01:12:49 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 22:12:49 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: oefmt pypy/{objspace,tool}/
Message-ID: <5726e1d1.878d1c0a.ed012.45f0@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84119:b974474cf57b
Date: 2016-05-01 22:09 -0700
http://bitbucket.org/pypy/pypy/changeset/b974474cf57b/
Log: oefmt pypy/{objspace,tool}/
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -247,8 +247,8 @@
if space.is_w(w_restype, space.w_int):
return space.int_w(w_res) != 0
else:
- msg = "__nonzero__ should return bool or integer"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "__nonzero__ should return bool or integer")
def nonzero(space, w_obj):
if space.is_true(w_obj):
@@ -282,8 +282,7 @@
w_iter = space.get_and_call_function(w_descr, w_obj)
w_next = space.lookup(w_iter, 'next')
if w_next is None:
- raise OperationError(space.w_TypeError,
- space.wrap("iter() returned non-iterator"))
+ raise oefmt(space.w_TypeError, "iter() returned non-iterator")
return w_iter
def next(space, w_obj):
@@ -382,8 +381,7 @@
if _check_notimplemented(space, w_res):
return w_res
- raise OperationError(space.w_TypeError,
- space.wrap("operands do not support **"))
+ raise oefmt(space.w_TypeError, "operands do not support **")
def inplace_pow(space, w_lhs, w_rhs):
w_impl = space.lookup(w_lhs, '__ipow__')
@@ -439,8 +437,8 @@
bigint = space.bigint_w(w_result)
return space.wrap(bigint.hash())
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__hash__() should return an int or long"))
+ raise oefmt(space.w_TypeError,
+ "__hash__() should return an int or long")
def userdel(space, w_obj):
w_del = space.lookup(w_obj, '__del__')
@@ -469,8 +467,7 @@
def coerce(space, w_obj1, w_obj2):
w_res = space.try_coerce(w_obj1, w_obj2)
if w_res is None:
- raise OperationError(space.w_TypeError,
- space.wrap("coercion failed"))
+ raise oefmt(space.w_TypeError, "coercion failed")
return w_res
def try_coerce(space, w_obj1, w_obj2):
@@ -494,13 +491,13 @@
return None
if (not space.isinstance_w(w_res, space.w_tuple) or
space.len_w(w_res) != 2):
- raise OperationError(space.w_TypeError,
- space.wrap("coercion should return None or 2-tuple"))
+ raise oefmt(space.w_TypeError,
+ "coercion should return None or 2-tuple")
w_res = space.newtuple([space.getitem(w_res, space.wrap(1)), space.getitem(w_res, space.wrap(0))])
elif (not space.isinstance_w(w_res, space.w_tuple) or
space.len_w(w_res) != 2):
- raise OperationError(space.w_TypeError,
- space.wrap("coercion should return None or 2-tuple"))
+ raise oefmt(space.w_TypeError,
+ "coercion should return None or 2-tuple")
return w_res
def issubtype(space, w_sub, w_type):
@@ -517,8 +514,7 @@
def issubtype_allow_override(space, w_sub, w_type):
w_check = space.lookup(w_type, "__subclasscheck__")
if w_check is None:
- raise OperationError(space.w_TypeError,
- space.wrap("issubclass not supported here"))
+ raise oefmt(space.w_TypeError, "issubclass not supported here")
return space.get_and_call_function(w_check, w_type, w_sub)
def isinstance_allow_override(space, w_inst, w_type):
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -446,8 +446,8 @@
return StringBuffer(self._value)
def writebuf_w(self, space):
- raise OperationError(space.w_TypeError, space.wrap(
- "Cannot use string as modifiable buffer"))
+ raise oefmt(space.w_TypeError,
+ "Cannot use string as modifiable buffer")
charbuf_w = str_w
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -41,7 +41,8 @@
if space.is_w(space.type(w_key), space.w_str):
self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
else:
- raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type"))
+ raise oefmt(space.w_TypeError,
+ "cannot add non-string keys to dict of a type")
def setitem_str(self, w_dict, key, w_value):
w_type = self.unerase(w_dict.dstorage)
diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py
--- a/pypy/objspace/std/formatting.py
+++ b/pypy/objspace/std/formatting.py
@@ -28,27 +28,24 @@
try:
w_result = self.values_w[self.values_pos]
except IndexError:
- space = self.space
- raise OperationError(space.w_TypeError, space.wrap(
- 'not enough arguments for format string'))
+ raise oefmt(self.space.w_TypeError,
+ "not enough arguments for format string")
else:
self.values_pos += 1
return w_result
def checkconsumed(self):
if self.values_pos < len(self.values_w) and self.w_valuedict is None:
- space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap('not all arguments converted '
- 'during string formatting'))
+ raise oefmt(self.space.w_TypeError,
+ "not all arguments converted during string formatting")
def std_wp_int(self, r, prefix='', keep_zero=False):
# use self.prec to add some '0' on the left of the number
if self.prec >= 0:
if self.prec > 1000:
- raise OperationError(
- self.space.w_OverflowError, self.space.wrap(
- 'formatted integer is too long (precision too large?)'))
+ raise oefmt(self.space.w_OverflowError,
+ "formatted integer is too long (precision too "
+ "large?)")
sign = r[0] == '-'
padding = self.prec - (len(r)-int(sign))
if padding > 0:
@@ -170,9 +167,7 @@
try:
return self.fmt[self.fmtpos]
except IndexError:
- space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("incomplete format"))
+ raise oefmt(self.space.w_ValueError, "incomplete format")
# Only shows up if we've already started inlining format(), so just
# unconditionally unroll this.
@@ -188,8 +183,7 @@
c = fmt[i]
except IndexError:
space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("incomplete format key"))
+ raise oefmt(space.w_ValueError, "incomplete format key")
if c == ')':
pcount -= 1
if pcount == 0:
@@ -204,8 +198,7 @@
# return the value corresponding to a key in the input dict
space = self.space
if self.w_valuedict is None:
- raise OperationError(space.w_TypeError,
- space.wrap("format requires a mapping"))
+ raise oefmt(space.w_TypeError, "format requires a mapping")
w_key = space.wrap(key)
return space.getitem(self.w_valuedict, w_key)
@@ -347,9 +340,9 @@
s = space.str_w(w_s)
else:
s = c
- msg = "unsupported format character '%s' (0x%x) at index %d" % (
- s, ord(c), self.fmtpos - 1)
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "unsupported format character '%s' (%s) at index %d",
+ s, hex(ord(c)), self.fmtpos - 1)
def std_wp(self, r):
length = len(r)
@@ -434,9 +427,8 @@
space = self.space
w_impl = space.lookup(w_value, '__str__')
if w_impl is None:
- raise OperationError(space.w_TypeError,
- space.wrap("operand does not support "
- "unary str"))
+ raise oefmt(space.w_TypeError,
+ "operand does not support unary str")
w_result = space.get_and_call_function(w_impl, w_value)
if space.isinstance_w(w_result,
space.w_unicode):
@@ -469,16 +461,14 @@
if space.isinstance_w(w_value, space.w_str):
s = space.str_w(w_value)
if len(s) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("%c requires int or char"))
+ raise oefmt(space.w_TypeError, "%c requires int or char")
self.std_wp(s)
elif space.isinstance_w(w_value, space.w_unicode):
if not do_unicode:
raise NeedUnicodeFormattingError
ustr = space.unicode_w(w_value)
if len(ustr) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("%c requires int or unichar"))
+ raise oefmt(space.w_TypeError, "%c requires int or unichar")
self.std_wp(ustr)
else:
n = space.int_w(w_value)
@@ -486,15 +476,15 @@
try:
c = unichr(n)
except ValueError:
- raise OperationError(space.w_OverflowError,
- space.wrap("unicode character code out of range"))
+ raise oefmt(space.w_OverflowError,
+ "unicode character code out of range")
self.std_wp(c)
else:
try:
s = chr(n)
- except ValueError: # chr(out-of-range)
- raise OperationError(space.w_OverflowError,
- space.wrap("character code not in range(256)"))
+ except ValueError:
+ raise oefmt(space.w_OverflowError,
+ "character code not in range(256)")
self.std_wp(s)
return StringFormatter
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -566,8 +566,7 @@
index = space.getindex_w(w_index, space.w_IndexError, "list index")
return self.getitem(index)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_getslice(self, space, w_start, w_stop):
length = self.length()
@@ -594,8 +593,7 @@
try:
self.setitem(idx, w_any)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_setslice(self, space, w_start, w_stop, w_iterable):
length = self.length()
@@ -621,8 +619,7 @@
try:
self.pop(idx)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_delslice(self, space, w_start, w_stop):
length = self.length()
@@ -662,8 +659,7 @@
index (default last)'''
length = self.length()
if length == 0:
- raise OperationError(space.w_IndexError,
- space.wrap("pop from empty list"))
+ raise oefmt(space.w_IndexError, "pop from empty list")
# clearly differentiate between list.pop() and list.pop(index)
if index == -1:
return self.pop_end() # cannot raise because list is not empty
@@ -672,8 +668,7 @@
try:
return self.pop(index)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("pop index out of range"))
+ raise oefmt(space.w_IndexError, "pop index out of range")
def descr_remove(self, space, w_value):
'L.remove(value) -- remove first occurrence of value'
@@ -769,8 +764,7 @@
self.__init__(space, sorter.list)
if mucked:
- raise OperationError(space.w_ValueError,
- space.wrap("list modified during sort"))
+ raise oefmt(space.w_ValueError, "list modified during sort")
find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find')
@@ -1489,14 +1483,15 @@
def setslice(self, w_list, start, step, slicelength, w_other):
assert slicelength >= 0
+ space = self.space
- if self is self.space.fromcache(ObjectListStrategy):
+ if self is space.fromcache(ObjectListStrategy):
w_other = w_other._temporarily_as_objects()
elif not self.list_is_correct_type(w_other) and w_other.length() != 0:
w_list.switch_to_object_strategy()
w_other_as_object = w_other._temporarily_as_objects()
assert (w_other_as_object.strategy is
- self.space.fromcache(ObjectListStrategy))
+ space.fromcache(ObjectListStrategy))
w_list.setslice(start, step, slicelength, w_other_as_object)
return
@@ -1522,7 +1517,7 @@
assert start >= 0
del items[start:start + delta]
elif len2 != slicelength: # No resize for extended slices
- raise oefmt(self.space.w_ValueError,
+ raise oefmt(space.w_ValueError,
"attempt to assign sequence of size %d to extended "
"slice of size %d", len2, slicelength)
@@ -2120,8 +2115,8 @@
result = space.int_w(w_result)
except OperationError, e:
if e.match(space, space.w_TypeError):
- raise OperationError(space.w_TypeError,
- space.wrap("comparison function must return int"))
+ raise oefmt(space.w_TypeError,
+ "comparison function must return int")
raise
return result < 0
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -563,12 +563,11 @@
@objectmodel.dont_inline
def _obj_setdict(self, space, w_dict):
- from pypy.interpreter.error import OperationError
+ from pypy.interpreter.error import oefmt
terminator = self._get_mapdict_map().terminator
assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator)
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting dictionary to a non-dict"))
+ raise oefmt(space.w_TypeError, "setting dictionary to a non-dict")
assert isinstance(w_dict, W_DictMultiObject)
w_olddict = self.getdict(space)
assert isinstance(w_olddict, W_DictMultiObject)
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -63,8 +63,7 @@
else:
out = rstring.StringBuilder()
if not level:
- raise OperationError(space.w_ValueError,
- space.wrap("Recursion depth exceeded"))
+ raise oefmt(space.w_ValueError, "Recursion depth exceeded")
level -= 1
s = self.template
return self._do_build_string(start, end, level, out, s)
@@ -82,14 +81,12 @@
markup_follows = True
if c == "}":
if at_end or s[i] != "}":
- raise OperationError(space.w_ValueError,
- space.wrap("Single '}'"))
+ raise oefmt(space.w_ValueError, "Single '}'")
i += 1
markup_follows = False
if c == "{":
if at_end:
- raise OperationError(space.w_ValueError,
- space.wrap("Single '{'"))
+ raise oefmt(space.w_ValueError, "Single '{'")
if s[i] == "{":
i += 1
markup_follows = False
@@ -121,8 +118,7 @@
break
i += 1
if nested:
- raise OperationError(space.w_ValueError,
- space.wrap("Unmatched '{'"))
+ raise oefmt(space.w_ValueError, "Unmatched '{'")
rendered = self._render_field(field_start, i, recursive, level)
out.append(rendered)
i += 1
@@ -144,16 +140,15 @@
if c == "!":
i += 1
if i == end:
- w_msg = self.space.wrap("expected conversion")
- raise OperationError(self.space.w_ValueError, w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "expected conversion")
conversion = s[i]
i += 1
if i < end:
if s[i] != ':':
- w_msg = self.space.wrap("expected ':' after"
- " format specifier")
- raise OperationError(self.space.w_ValueError,
- w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "expected ':' after format "
+ "specifier")
i += 1
else:
conversion = None
@@ -189,13 +184,12 @@
if use_numeric:
if self.auto_numbering_state == ANS_MANUAL:
if empty:
- msg = "switching from manual to automatic numbering"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "switching from manual to automatic "
+ "numbering")
elif not empty:
- msg = "switching from automatic to manual numbering"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "switching from automatic to manual numbering")
if empty:
index = self.auto_numbering
self.auto_numbering += 1
@@ -217,8 +211,7 @@
try:
w_arg = self.args[index]
except IndexError:
- w_msg = space.wrap("index out of range")
- raise OperationError(space.w_IndexError, w_msg)
+ raise oefmt(space.w_IndexError, "out of range")
return self._resolve_lookups(w_arg, name, i, end)
@jit.unroll_safe
@@ -237,8 +230,8 @@
break
i += 1
if start == i:
- w_msg = space.wrap("Empty attribute in format string")
- raise OperationError(space.w_ValueError, w_msg)
+ raise oefmt(space.w_ValueError,
+ "Empty attribute in format string")
w_attr = space.wrap(name[start:i])
if w_obj is not None:
w_obj = space.getattr(w_obj, w_attr)
@@ -256,8 +249,7 @@
break
i += 1
if not got_bracket:
- raise OperationError(space.w_ValueError,
- space.wrap("Missing ']'"))
+ raise oefmt(space.w_ValueError, "Missing ']'")
index, reached = _parse_int(self.space, name, start, i)
if index != -1 and reached == i:
w_item = space.wrap(index)
@@ -270,8 +262,8 @@
self.parser_list_w.append(space.newtuple([
space.w_False, w_item]))
else:
- msg = "Only '[' and '.' may follow ']'"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Only '[' and '.' may follow ']'")
return w_obj
def formatter_field_name_split(self):
@@ -311,8 +303,7 @@
return space.call_function(space.w_unicode, w_obj)
return space.str(w_obj)
else:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("invalid conversion"))
+ raise oefmt(space.w_ValueError, "invalid conversion")
def _render_field(self, start, end, recursive, level):
name, conversion, spec_start = self._parse_field(start, end)
@@ -471,19 +462,17 @@
i += 1
self._precision, i = _parse_int(self.space, spec, i, length)
if self._precision == -1:
- raise OperationError(space.w_ValueError,
- space.wrap("no precision given"))
+ raise oefmt(space.w_ValueError, "no precision given")
if length - i > 1:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid format spec"))
+ raise oefmt(space.w_ValueError, "invalid format spec")
if length - i == 1:
presentation_type = spec[i]
if self.is_unicode:
try:
the_type = spec[i].encode("ascii")[0]
except UnicodeEncodeError:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid presentation type"))
+ raise oefmt(space.w_ValueError,
+ "invalid presentation type")
else:
the_type = presentation_type
i += 1
@@ -502,8 +491,7 @@
# ok
pass
else:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid type with ','"))
+ raise oefmt(space.w_ValueError, "invalid type with ','")
return False
def _calc_padding(self, string, length):
@@ -546,9 +534,8 @@
return rstring.StringBuilder()
def _unknown_presentation(self, tp):
- msg = "unknown presentation for %s: '%s'"
- w_msg = self.space.wrap(msg % (tp, self._type))
- raise OperationError(self.space.w_ValueError, w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "unknown presentation for %s: '%s'", tp, self._type)
def format_string(self, string):
space = self.space
@@ -557,14 +544,16 @@
if self._type != "s":
self._unknown_presentation("string")
if self._sign != "\0":
- msg = "Sign not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Sign not allowed in string format specifier")
if self._alternate:
- msg = "Alternate form (#) not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in string format "
+ "specifier")
if self._align == "=":
- msg = "'=' alignment not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "'=' alignment not allowed in string format "
+ "specifier")
length = len(string)
precision = self._precision
if precision != -1 and length >= precision:
@@ -762,14 +751,14 @@
def _format_int_or_long(self, w_num, kind):
space = self.space
if self._precision != -1:
- msg = "precision not allowed in integer type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "precision not allowed in integer type")
sign_char = "\0"
tp = self._type
if tp == "c":
if self._sign != "\0":
- msg = "sign not allowed with 'c' presentation type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "sign not allowed with 'c' presentation type")
value = space.int_w(w_num)
if self.is_unicode:
result = runicode.UNICHR(value)
@@ -920,8 +909,8 @@
flags = 0
default_precision = 6
if self._alternate:
- msg = "Alternate form (#) not allowed in float formats"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in float formats")
tp = self._type
self._get_locale(tp)
if tp == "\0":
@@ -989,18 +978,19 @@
default_precision = 6
if self._align == "=":
# '=' alignment is invalid
- msg = ("'=' alignment flag is not allowed in"
- " complex format specifier")
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "'=' alignment flag is not allowed in complex "
+ "format specifier")
if self._fill_char == "0":
- #zero padding is invalid
- msg = "Zero padding is not allowed in complex format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ # zero padding is invalid
+ raise oefmt(space.w_ValueError,
+ "Zero padding is not allowed in complex format "
+ "specifier")
if self._alternate:
- #alternate is invalid
- msg = "Alternate form (#) not allowed in complex format specifier"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ # alternate is invalid
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in complex format "
+ "specifier")
skip_re = 0
add_parens = 0
if tp == "\0":
diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py
--- a/pypy/objspace/std/objectobject.py
+++ b/pypy/objspace/std/objectobject.py
@@ -198,8 +198,7 @@
elif space.isinstance_w(w_format_spec, space.w_str):
w_as_str = space.str(w_obj)
else:
- msg = "format_spec must be a string"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "format_spec must be a string")
if space.len_w(w_format_spec) > 0:
msg = "object.__format__ with a non-empty format string is deprecated"
space.warn(space.wrap(msg), space.w_PendingDeprecationWarning)
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -374,8 +374,8 @@
# one is not
def _wrap_expected_length(self, expected, got):
- return OperationError(self.w_ValueError,
- self.wrap("expected length %d, got %d" % (expected, got)))
+ return oefmt(self.w_ValueError,
+ "expected length %d, got %d", expected, got)
def unpackiterable(self, w_obj, expected_length=-1):
if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj):
@@ -506,8 +506,7 @@
w_tup = self.call_function(w_indices, w_length)
l_w = self.unpackiterable(w_tup)
if not len(l_w) == 3:
- raise OperationError(self.w_ValueError,
- self.wrap("Expected tuple of length 3"))
+ raise oefmt(self.w_ValueError, "Expected tuple of length 3")
return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2])
_DescrOperation_is_true = is_true
@@ -613,13 +612,12 @@
def _type_issubtype(self, w_sub, w_type):
if isinstance(w_sub, W_TypeObject) and isinstance(w_type, W_TypeObject):
return self.wrap(w_sub.issubtype(w_type))
- raise OperationError(self.w_TypeError, self.wrap("need type objects"))
+ raise oefmt(self.w_TypeError, "need type objects")
@specialize.arg_or_var(2)
def _type_isinstance(self, w_inst, w_type):
if not isinstance(w_type, W_TypeObject):
- raise OperationError(self.w_TypeError,
- self.wrap("need type object"))
+ raise oefmt(self.w_TypeError, "need type object")
if is_annotation_constant(w_type):
cls = self._get_interplevel_cls(w_type)
if cls is not None:
diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py
--- a/pypy/objspace/std/proxyobject.py
+++ b/pypy/objspace/std/proxyobject.py
@@ -1,7 +1,7 @@
""" transparent list implementation
"""
from pypy.interpreter import baseobjspace
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
def transparent_class(name, BaseCls):
@@ -20,8 +20,9 @@
return self.w_type
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("You cannot override __class__ for transparent proxies"))
+ raise oefmt(space.w_TypeError,
+ "You cannot override __class__ for transparent "
+ "proxies")
def getdictvalue(self, space, attr):
try:
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -1,6 +1,6 @@
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.signature import Signature
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std.bytesobject import W_BytesObject
@@ -173,8 +173,7 @@
def descr_cmp(self, space, w_other):
if space.is_w(space.type(self), space.type(w_other)):
# hack hack until we get the expected result
- raise OperationError(space.w_TypeError,
- space.wrap('cannot compare sets using cmp()'))
+ raise oefmt(space.w_TypeError, "cannot compare sets using cmp()")
else:
return space.w_NotImplemented
@@ -840,8 +839,7 @@
return EmptyIteratorImplementation(self.space, self, w_set)
def popitem(self, w_set):
- raise OperationError(self.space.w_KeyError,
- self.space.wrap('pop from an empty set'))
+ raise oefmt(self.space.w_KeyError, "pop from an empty set")
class AbstractUnwrappedSetStrategy(object):
@@ -1198,8 +1196,7 @@
result = storage.popitem()
except KeyError:
# strategy may still be the same even if dict is empty
- raise OperationError(self.space.w_KeyError,
- self.space.wrap('pop from an empty set'))
+ raise oefmt(self.space.w_KeyError, "pop from an empty set")
return self.wrap(result[0])
@@ -1421,8 +1418,8 @@
return None
if self.len != self.setimplementation.length():
self.len = -1 # Make this error state sticky
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("set changed size during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "set changed size during iteration")
# look for the next entry
if self.pos < self.len:
result = self.next_entry()
@@ -1435,8 +1432,8 @@
# We try to explicitly look it up in the set.
if not self.setimplementation.has_key(result):
self.len = -1 # Make this error state sticky
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("dictionary changed during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "dictionary changed during iteration")
return result
# no more entries
self.setimplementation = None
diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py
--- a/pypy/objspace/std/sliceobject.py
+++ b/pypy/objspace/std/sliceobject.py
@@ -3,7 +3,7 @@
import sys
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import GetSetProperty, TypeDef
from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
@@ -29,8 +29,7 @@
else:
step = _eval_slice_index(space, w_slice.w_step)
if step == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("slice step cannot be zero"))
+ raise oefmt(space.w_ValueError, "slice step cannot be zero")
if space.is_w(w_slice.w_start, space.w_None):
if step < 0:
start = length - 1
@@ -98,11 +97,9 @@
elif len(args_w) == 3:
w_start, w_stop, w_step = args_w
elif len(args_w) > 3:
- raise OperationError(space.w_TypeError,
- space.wrap("slice() takes at most 3 arguments"))
+ raise oefmt(space.w_TypeError, "slice() takes at most 3 arguments")
else:
- raise OperationError(space.w_TypeError,
- space.wrap("slice() takes at least 1 argument"))
+ raise oefmt(space.w_TypeError, "slice() takes at least 1 argument")
w_obj = space.allocate_instance(W_SliceObject, w_slicetype)
W_SliceObject.__init__(w_obj, w_start, w_stop, w_step)
return w_obj
@@ -166,8 +163,7 @@
def fget(space, w_obj):
from pypy.objspace.std.sliceobject import W_SliceObject
if not isinstance(w_obj, W_SliceObject):
- raise OperationError(space.w_TypeError,
- space.wrap("descriptor is for 'slice'"))
+ raise oefmt(space.w_TypeError, "descriptor is for 'slice'")
return getattr(w_obj, name)
return GetSetProperty(fget)
@@ -200,9 +196,9 @@
except OperationError, err:
if not err.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError,
- space.wrap("slice indices must be integers or "
- "None or have an __index__ method"))
+ raise oefmt(space.w_TypeError,
+ "slice indices must be integers or None or have an "
+ "__index__ method")
def adapt_lower_bound(space, size, w_index):
index = _eval_slice_index(space, w_index)
diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py
--- a/pypy/objspace/std/specialisedtupleobject.py
+++ b/pypy/objspace/std/specialisedtupleobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.objspace.std.tupleobject import W_AbstractTupleObject
from pypy.objspace.std.util import negate
from rpython.rlib.objectmodel import compute_hash, specialize
@@ -117,8 +117,7 @@
if typetuple[i] != object:
value = space.wrap(value)
return value
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
cls.__name__ = ('W_SpecialisedTupleObject_' +
''.join([t.__name__[0] for t in typetuple]))
@@ -181,8 +180,7 @@
def specialized_zip_2_lists(space, w_list1, w_list2):
from pypy.objspace.std.listobject import W_ListObject
if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject:
- raise OperationError(space.w_TypeError,
- space.wrap("expected two exact lists"))
+ raise oefmt(space.w_TypeError, "expected two exact lists")
if space.config.objspace.std.withspecialisedtuple:
intlist1 = w_list1.getitems_int()
diff --git a/pypy/objspace/std/transparent.py b/pypy/objspace/std/transparent.py
--- a/pypy/objspace/std/transparent.py
+++ b/pypy/objspace/std/transparent.py
@@ -49,7 +49,7 @@
Return something that looks like it is of type typ. Its behaviour is
completely controlled by the controller."""
if not space.is_true(space.callable(w_controller)):
- raise OperationError(space.w_TypeError, space.wrap("controller should be function"))
+ raise oefmt(space.w_TypeError, "controller should be function")
if isinstance(w_type, W_TypeObject):
if space.is_true(space.issubtype(w_type, space.gettypeobject(Function.typedef))):
@@ -65,7 +65,7 @@
if w_type.layout.typedef is space.w_object.layout.typedef:
return W_Transparent(space, w_type, w_controller)
else:
- raise OperationError(space.w_TypeError, space.wrap("type expected as first argument"))
+ raise oefmt(space.w_TypeError, "type expected as first argument")
w_lookup = w_type
for k, v in type_cache.cache:
if w_lookup == k:
diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py
--- a/pypy/objspace/std/tupleobject.py
+++ b/pypy/objspace/std/tupleobject.py
@@ -3,7 +3,7 @@
import sys
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import TypeDef
@@ -213,8 +213,7 @@
w_item = self.tolist()[i]
if space.eq_w(w_item, w_obj):
return space.wrap(i)
- raise OperationError(space.w_ValueError,
- space.wrap("tuple.index(x): x not in tuple"))
+ raise oefmt(space.w_ValueError, "tuple.index(x): x not in tuple")
W_AbstractTupleObject.typedef = TypeDef(
"tuple",
@@ -326,8 +325,7 @@
try:
return self.wrappeditems[index]
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
def wraptuple(space, list_w):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -1,7 +1,7 @@
import weakref
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root, SpaceCache
-from pypy.interpreter.error import oefmt, OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import Function, StaticMethod
from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
descr_get_dict, dict_descr, Member, TypeDef
@@ -1240,8 +1240,8 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise OperationError(space.w_TypeError, space.wrap(
- "cycle among base classes: " + ' < '.join(names)))
+ raise oefmt(space.w_TypeError,
+ "cycle among base classes: %s", ' < '.join(names))
class TypeCache(SpaceCache):
diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -73,8 +73,8 @@
return StringBuffer(builder.build())
def writebuf_w(self, space):
- raise OperationError(space.w_TypeError, space.wrap(
- "cannot use unicode as modifiable buffer"))
+ raise oefmt(space.w_TypeError,
+ "cannot use unicode as modifiable buffer")
charbuf_w = str_w
diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py
--- a/pypy/tool/pytest/appsupport.py
+++ b/pypy/tool/pytest/appsupport.py
@@ -2,7 +2,7 @@
import py
from pypy.interpreter import gateway, pycode
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
try:
from _pytest.assertion.newinterpret import interpret
@@ -232,9 +232,8 @@
args_w, kwds_w = __args__.unpack()
if space.isinstance_w(w_expr, space.w_str):
if args_w:
- raise OperationError(space.w_TypeError,
- space.wrap("raises() takes no argument "
- "after a string expression"))
+ raise oefmt(space.w_TypeError,
+ "raises() takes no argument after a string expression")
expr = space.unwrap(w_expr)
source = py.code.Source(expr)
frame = space.getexecutioncontext().gettopframe()
@@ -264,8 +263,7 @@
if e.match(space, w_ExpectedException):
return _exc_info(space, e)
raise
- raise OperationError(space.w_AssertionError,
- space.wrap("DID NOT RAISE"))
+ raise oefmt(space.w_AssertionError, "DID NOT RAISE")
app_raises = gateway.interp2app_temp(pypyraises)
From pypy.commits at gmail.com Mon May 2 02:33:03 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 23:33:03 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: oefmt pypy/module/_*
Message-ID: <5726f49f.89cbc20a.a5dd1.3307@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84120:18b5bfbd3dfb
Date: 2016-05-01 22:34 -0700
http://bitbucket.org/pypy/pypy/changeset/18b5bfbd3dfb/
Log: oefmt pypy/module/_*
diff too long, truncating to 2000 out of 2996 lines
diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py
--- a/pypy/module/__builtin__/compiling.py
+++ b/pypy/module/__builtin__/compiling.py
@@ -3,7 +3,7 @@
"""
from pypy.interpreter.pycode import PyCode
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.astcompiler import consts, ast
from pypy.interpreter.gateway import unwrap_spec
@@ -26,8 +26,7 @@
if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST |
consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8 |
consts.PyCF_ACCEPT_NULL_BYTES):
- raise OperationError(space.w_ValueError,
- space.wrap("compile() unrecognized flags"))
+ raise oefmt(space.w_ValueError, "compile() unrecognized flags")
if not dont_inherit:
caller = ec.gettopframe_nohidden()
@@ -35,9 +34,8 @@
flags |= ec.compiler.getcodeflags(caller.getcode())
if mode not in ('exec', 'eval', 'single'):
- raise OperationError(space.w_ValueError,
- space.wrap("compile() arg 3 must be 'exec' "
- "or 'eval' or 'single'"))
+ raise oefmt(space.w_ValueError,
+ "compile() arg 3 must be 'exec' or 'eval' or 'single'")
if space.isinstance_w(w_source, space.gettypeobject(ast.W_AST.typedef)):
ast_node = ast.mod.from_object(space, w_source)
@@ -55,8 +53,8 @@
if not (flags & consts.PyCF_ACCEPT_NULL_BYTES):
if '\x00' in source:
- raise OperationError(space.w_TypeError, space.wrap(
- "compile() expected string without null bytes"))
+ raise oefmt(space.w_TypeError,
+ "compile() expected string without null bytes")
if flags & consts.PyCF_ONLY_AST:
node = ec.compiler.compile_to_ast(source, filename, mode, flags)
@@ -73,8 +71,6 @@
are dictionaries, defaulting to the current current globals and locals.
If only globals is given, locals defaults to it.
"""
- w = space.wrap
-
if (space.isinstance_w(w_code, space.w_str) or
space.isinstance_w(w_code, space.w_unicode)):
w_code = compile(space,
@@ -83,8 +79,8 @@
"", "eval")
if not isinstance(w_code, PyCode):
- raise OperationError(space.w_TypeError,
- w('eval() arg 1 must be a string or code object'))
+ raise oefmt(space.w_TypeError,
+ "eval() arg 1 must be a string or code object")
if space.is_none(w_globals):
caller = space.getexecutioncontext().gettopframe_nohidden()
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import StaticMethod, ClassMethod
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w,
@@ -67,9 +67,9 @@
raise
w_type = w_objtype
if not space.is_true(space.issubtype(w_type, w_starttype)):
- raise OperationError(space.w_TypeError,
- space.wrap("super(type, obj): "
- "obj must be an instance or subtype of type"))
+ raise oefmt(space.w_TypeError,
+ "super(type, obj): obj must be an instance or "
+ "subtype of type")
# XXX the details of how allocate_instance() should be used are not
# really well defined
w_result = space.allocate_instance(W_Super, w_subtype)
@@ -126,21 +126,18 @@
if space.is_w(w_obj, space.w_None):
return space.wrap(self)
if space.is_w(self.w_fget, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "unreadable attribute"))
+ raise oefmt(space.w_AttributeError, "unreadable attribute")
return space.call_function(self.w_fget, w_obj)
def set(self, space, w_obj, w_value):
if space.is_w(self.w_fset, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "can't set attribute"))
+ raise oefmt(space.w_AttributeError, "can't set attribute")
space.call_function(self.w_fset, w_obj, w_value)
return space.w_None
def delete(self, space, w_obj):
if space.is_w(self.w_fdel, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "can't delete attribute"))
+ raise oefmt(space.w_AttributeError, "can't delete attribute")
space.call_function(self.w_fdel, w_obj)
return space.w_None
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -5,7 +5,7 @@
import sys
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef
from rpython.rlib import jit, rarithmetic
@@ -32,8 +32,7 @@
# hi-lo-1 = M-(-M-1)-1 = 2*M. Therefore unsigned long has enough
# precision to compute the RHS exactly.
if step == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("step argument must not be zero"))
+ raise oefmt(space.w_ValueError, "step argument must not be zero")
elif step < 0:
lo, hi, step = hi, lo, -step
if lo < hi:
@@ -42,8 +41,7 @@
diff = uhi - ulo - 1
n = intmask(diff // r_uint(step) + 1)
if n < 0:
- raise OperationError(space.w_OverflowError,
- space.wrap("result has too many items"))
+ raise oefmt(space.w_OverflowError, "result has too many items")
else:
n = 0
return n
@@ -63,14 +61,14 @@
w_stop = w_y
if space.isinstance_w(w_stop, space.w_float):
- raise OperationError(space.w_TypeError,
- space.wrap("range() integer end argument expected, got float."))
+ raise oefmt(space.w_TypeError,
+ "range() integer end argument expected, got float.")
if space.isinstance_w(w_start, space.w_float):
- raise OperationError(space.w_TypeError,
- space.wrap("range() integer start argument expected, got float."))
+ raise oefmt(space.w_TypeError,
+ "range() integer start argument expected, got float.")
if space.isinstance_w(w_step, space.w_float):
- raise OperationError(space.w_TypeError,
- space.wrap("range() integer step argument expected, got float."))
+ raise oefmt(space.w_TypeError,
+ "range() integer step argument expected, got float.")
w_start = space.int(w_start)
w_stop = space.int(w_stop)
@@ -112,8 +110,7 @@
step = st = space.bigint_w(w_step)
if not step.tobool():
- raise OperationError(space.w_ValueError,
- space.wrap("step argument must not be zero"))
+ raise oefmt(space.w_ValueError, "step argument must not be zero")
elif step.sign < 0:
lo, hi, st = hi, lo, st.neg()
@@ -123,8 +120,7 @@
try:
howmany = n.toint()
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("result has too many items"))
+ raise oefmt(space.w_OverflowError, "result has too many items")
else:
howmany = 0
@@ -155,16 +151,18 @@
elif len(args_w):
w_sequence = args_w[0]
else:
- msg = "%s() expects at least one argument" % (implementation_of,)
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "%s() expects at least one argument",
+ implementation_of)
w_key = None
kwds = args.keywords
if kwds:
if kwds[0] == "key" and len(kwds) == 1:
w_key = args.keywords_w[0]
else:
- msg = "%s() got unexpected keyword argument" % (implementation_of,)
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "%s() got unexpected keyword argument",
+ implementation_of)
w_iter = space.iter(w_sequence)
w_type = space.type(w_iter)
@@ -191,8 +189,7 @@
w_max_item = w_item
w_max_val = w_compare_with
if w_max_item is None:
- msg = "arg is an empty sequence"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError, "arg is an empty sequence")
return w_max_item
if unroll:
min_max_impl = jit.unroll_safe(min_max_impl)
@@ -341,8 +338,8 @@
def __init__(self, space, w_sequence):
self.remaining = space.len_w(w_sequence) - 1
if space.lookup(w_sequence, "__getitem__") is None:
- msg = "reversed() argument must be a sequence"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "reversed() argument must be a sequence")
self.w_sequence = w_sequence
def descr___iter__(self, space):
@@ -439,8 +436,7 @@
i += len
if 0 <= i < len:
return space.wrap(self.start + i * self.step)
- raise OperationError(space.w_IndexError,
- space.wrap("xrange object index out of range"))
+ raise oefmt(space.w_IndexError, "xrange object index out of range")
def descr_iter(self):
if self.promote_step and self.step == 1:
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -32,8 +32,7 @@
if space.is_true(space.callable(w_metaclass)):
return space.call_function(w_metaclass, w_name,
w_bases, w_dict)
- raise OperationError(space.w_TypeError,
- space.wrap("base must be class"))
+ raise oefmt(space.w_TypeError, "base must be class")
return W_ClassObject(space, w_name, bases_w, w_dict)
@@ -58,28 +57,23 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__dict__ must be a dictionary object"))
+ raise oefmt(space.w_TypeError,
+ "__dict__ must be a dictionary object")
self.w_dict = w_dict
def setname(self, space, w_newname):
if not space.isinstance_w(w_newname, space.w_str):
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be a string object")
- )
+ raise oefmt(space.w_TypeError, "__name__ must be a string object")
self.name = space.str_w(w_newname)
def setbases(self, space, w_bases):
if not space.isinstance_w(w_bases, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("__bases__ must be a tuple object")
- )
+ raise oefmt(space.w_TypeError, "__bases__ must be a tuple object")
bases_w = space.fixedview(w_bases)
for w_base in bases_w:
if not isinstance(w_base, W_ClassObject):
- raise OperationError(space.w_TypeError,
- space.wrap("__bases__ items must be classes"))
+ raise oefmt(space.w_TypeError,
+ "__bases__ items must be classes")
self.bases_w = bases_w
def is_subclass_of(self, other):
@@ -207,13 +201,9 @@
if w_init is not None:
w_result = space.call_args(w_init, __args__)
if not space.is_w(w_result, space.w_None):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__init__() should return None"))
+ raise oefmt(space.w_TypeError, "__init__() should return None")
elif __args__.arguments_w or __args__.keywords:
- raise OperationError(
- space.w_TypeError,
- space.wrap("this constructor takes no arguments"))
+ raise oefmt(space.w_TypeError, "this constructor takes no arguments")
return w_inst
W_ClassObject.typedef = TypeDef("classobj",
@@ -297,9 +287,7 @@
def descr_instance_new(space, w_type, w_class, w_dict=None):
# w_type is not used at all
if not isinstance(w_class, W_ClassObject):
- raise OperationError(
- space.w_TypeError,
- space.wrap("instance() first arg must be class"))
+ raise oefmt(space.w_TypeError, "instance() first arg must be class")
w_result = w_class.instantiate(space)
if not space.is_none(w_dict):
w_result.setdict(space, w_dict)
@@ -318,9 +306,7 @@
def set_oldstyle_class(self, space, w_class):
if w_class is None or not isinstance(w_class, W_ClassObject):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__class__ must be set to a class"))
+ raise oefmt(space.w_TypeError, "__class__ must be set to a class")
self.w_class = w_class
def getattr_from_class(self, space, name):
@@ -453,13 +439,9 @@
w_result = space.call_function(w_meth)
if space.isinstance_w(w_result, space.w_int):
if space.is_true(space.lt(w_result, space.wrap(0))):
- raise OperationError(
- space.w_ValueError,
- space.wrap("__len__() should return >= 0"))
+ raise oefmt(space.w_ValueError, "__len__() should return >= 0")
return w_result
- raise OperationError(
- space.w_TypeError,
- space.wrap("__len__() should return an int"))
+ raise oefmt(space.w_TypeError, "__len__() should return an int")
def descr_getitem(self, space, w_key):
w_meth = self.getattr(space, '__getitem__')
@@ -479,9 +461,7 @@
return space.call_function(w_meth)
w_meth = self.getattr(space, '__getitem__', False)
if w_meth is None:
- raise OperationError(
- space.w_TypeError,
- space.wrap("iteration over non-sequence"))
+ raise oefmt(space.w_TypeError, "iteration over non-sequence")
return space.newseqiter(self)
#XXX do I really need a next method? the old implementation had one, but I
# don't see the point
@@ -521,13 +501,10 @@
w_result = space.call_function(w_func)
if space.isinstance_w(w_result, space.w_int):
if space.is_true(space.lt(w_result, space.wrap(0))):
- raise OperationError(
- space.w_ValueError,
- space.wrap("__nonzero__() should return >= 0"))
+ raise oefmt(space.w_ValueError,
+ "__nonzero__() should return >= 0")
return w_result
- raise OperationError(
- space.w_TypeError,
- space.wrap("__nonzero__() should return an int"))
+ raise oefmt(space.w_TypeError, "__nonzero__() should return an int")
def descr_cmp(self, space, w_other): # do all the work here like CPython
w_a, w_b = _coerce_helper(space, self, w_other)
@@ -544,9 +521,8 @@
res = space.int_w(w_res)
except OperationError, e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__cmp__ must return int"))
+ raise oefmt(space.w_TypeError,
+ "__cmp__ must return int")
raise
if res > 0:
return space.wrap(1)
@@ -563,9 +539,8 @@
res = space.int_w(w_res)
except OperationError, e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__cmp__ must return int"))
+ raise oefmt(space.w_TypeError,
+ "__cmp__ must return int")
raise
if res < 0:
return space.wrap(1)
@@ -580,16 +555,13 @@
w_eq = self.getattr(space, '__eq__', False)
w_cmp = self.getattr(space, '__cmp__', False)
if w_eq is not None or w_cmp is not None:
- raise OperationError(space.w_TypeError,
- space.wrap("unhashable instance"))
+ raise oefmt(space.w_TypeError, "unhashable instance")
else:
return space.wrap(compute_identity_hash(self))
w_ret = space.call_function(w_func)
if (not space.isinstance_w(w_ret, space.w_int) and
not space.isinstance_w(w_ret, space.w_long)):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__hash__ must return int or long"))
+ raise oefmt(space.w_TypeError, "__hash__ must return int or long")
return w_ret
def descr_int(self, space):
@@ -603,9 +575,7 @@
return space.int(w_truncated)
except OperationError:
# Raise a different error
- raise OperationError(
- space.w_TypeError,
- space.wrap("__trunc__ returned non-Integral"))
+ raise oefmt(space.w_TypeError, "__trunc__ returned non-Integral")
def descr_long(self, space):
w_func = self.getattr(space, '__long__', False)
@@ -617,9 +587,8 @@
w_func = self.getattr(space, '__index__', False)
if w_func is not None:
return space.call_function(w_func)
- raise OperationError(
- space.w_TypeError,
- space.wrap("object cannot be interpreted as an index"))
+ raise oefmt(space.w_TypeError,
+ "object cannot be interpreted as an index")
def descr_contains(self, space, w_obj):
w_func = self.getattr(space, '__contains__', False)
@@ -674,8 +643,7 @@
def descr_next(self, space):
w_func = self.getattr(space, 'next', False)
if w_func is None:
- raise OperationError(space.w_TypeError,
- space.wrap("instance has no next() method"))
+ raise oefmt(space.w_TypeError, "instance has no next() method")
return space.call_function(w_func)
def descr_del(self, space):
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -3,7 +3,7 @@
"""
from pypy.interpreter import gateway
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from rpython.rlib.runicode import UNICHR
from rpython.rlib.rfloat import isnan, isinf, round_double
@@ -19,8 +19,7 @@
try:
char = __builtin__.chr(space.int_w(w_ascii))
except ValueError: # chr(out-of-range)
- raise OperationError(space.w_ValueError,
- space.wrap("character code not in range(256)"))
+ raise oefmt(space.w_ValueError, "character code not in range(256)")
return space.wrap(char)
@unwrap_spec(code=int)
@@ -30,8 +29,7 @@
try:
c = UNICHR(code)
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("unichr() arg out of range"))
+ raise oefmt(space.w_ValueError, "unichr() arg out of range")
return space.wrap(c)
def len(space, w_obj):
@@ -151,8 +149,8 @@
# finite x, and ndigits is not unreasonably large
z = round_double(number, ndigits)
if isinf(z):
- raise OperationError(space.w_OverflowError,
- space.wrap("rounded value too large to represent"))
+ raise oefmt(space.w_OverflowError,
+ "rounded value too large to represent")
return space.wrap(z)
# ____________________________________________________________
@@ -227,7 +225,7 @@
same value."""
if space.is_w(space.type(w_str), space.w_str):
return space.new_interned_w_str(w_str)
- raise OperationError(space.w_TypeError, space.wrap("intern() argument must be string."))
+ raise oefmt(space.w_TypeError, "intern() argument must be string.")
def callable(space, w_object):
"""Check whether the object appears to be callable (i.e., some kind of
diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py
--- a/pypy/module/__pypy__/interp_builders.py
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
from rpython.rlib.rstring import UnicodeBuilder, StringBuilder
@@ -16,8 +16,8 @@
def _check_done(self, space):
if self.builder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "Can't operate on a built builder"))
+ raise oefmt(space.w_ValueError,
+ "Can't operate on a built builder")
@unwrap_spec(size=int)
def descr__new__(space, w_subtype, size=-1):
@@ -32,8 +32,7 @@
def descr_append_slice(self, space, s, start, end):
self._check_done(space)
if not 0 <= start <= end <= len(s):
- raise OperationError(space.w_ValueError, space.wrap(
- "bad start/stop"))
+ raise oefmt(space.w_ValueError, "bad start/stop")
self.builder.append_slice(s, start, end)
def descr_build(self, space):
@@ -44,8 +43,7 @@
def descr_len(self, space):
if self.builder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "no length of built builder"))
+ raise oefmt(space.w_ValueError, "no length of built builder")
return space.wrap(self.builder.getlength())
W_Builder.__name__ = "W_%s" % name
diff --git a/pypy/module/__pypy__/interp_identitydict.py b/pypy/module/__pypy__/interp_identitydict.py
--- a/pypy/module/__pypy__/interp_identitydict.py
+++ b/pypy/module/__pypy__/interp_identitydict.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.baseobjspace import W_Root
@@ -35,9 +35,9 @@
raise OperationError(space.w_KeyError, w_key)
def descr_iter(self, space):
- raise OperationError(space.w_TypeError,
- space.wrap("'identity_dict' object does not support iteration; "
- "iterate over x.keys()"))
+ raise oefmt(space.w_TypeError,
+ "'identity_dict' object does not support iteration; "
+ "iterate over x.keys()")
def get(self, space, w_key, w_default=None):
if w_default is None:
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
+from pypy.interpreter.error import oefmt, wrap_oserror
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.pycode import CodeHookCache
from pypy.interpreter.pyframe import PyFrame
@@ -74,8 +74,8 @@
def lookup_special(space, w_obj, meth):
"""Lookup up a special method on an object."""
if space.is_oldstyle_instance(w_obj):
- w_msg = space.wrap("this doesn't do what you want on old-style classes")
- raise OperationError(space.w_TypeError, w_msg)
+ raise oefmt(space.w_TypeError,
+ "this doesn't do what you want on old-style classes")
w_descr = space.lookup(w_obj, meth)
if w_descr is None:
return space.w_None
@@ -97,8 +97,7 @@
elif isinstance(w_obj, W_BaseSetObject):
name = w_obj.strategy.__class__.__name__
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expecting dict or list or set object"))
+ raise oefmt(space.w_TypeError, "expecting dict or list or set object")
return space.wrap(name)
@@ -119,8 +118,7 @@
@unwrap_spec(sizehint=int)
def resizelist_hint(space, w_iterable, sizehint):
if not isinstance(w_iterable, W_ListObject):
- raise OperationError(space.w_TypeError,
- space.wrap("arg 1 must be a 'list'"))
+ raise oefmt(space.w_TypeError, "arg 1 must be a 'list'")
w_iterable._resize_hint(sizehint)
@unwrap_spec(sizehint=int)
@@ -181,8 +179,7 @@
elif space.is_w(space.type(w_obj), space.w_str):
jit.promote_string(space.str_w(w_obj))
elif space.is_w(space.type(w_obj), space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- "promoting unicode unsupported"))
+ raise oefmt(space.w_TypeError, "promoting unicode unsupported")
else:
jit.promote(w_obj)
return w_obj
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -88,8 +88,7 @@
ctype = self.ctype
if not isinstance(ctype, W_CTypeFunc):
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("expected a function ctype"))
+ raise oefmt(space.w_TypeError, "expected a function ctype")
return ctype
def hide_object(self):
@@ -219,8 +218,8 @@
invoke_callback,
unique_id)
if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK:
- raise OperationError(space.w_SystemError,
- space.wrap("libffi failed to build this callback"))
+ raise oefmt(space.w_SystemError,
+ "libffi failed to build this callback")
def py_invoke(self, ll_res, ll_args):
jitdriver1.jit_merge_point(callback=self,
@@ -234,9 +233,9 @@
space = fresult.space
if isinstance(fresult, W_CTypeVoid):
if not space.is_w(w_res, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("callback with the return type 'void'"
- " must return None"))
+ raise oefmt(space.w_TypeError,
+ "callback with the return type 'void' must return "
+ "None")
return
#
small_result = encode_result_for_libffi and fresult.size < SIZE_OF_FFI_ARG
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -113,8 +113,9 @@
if requires_ordering:
if (isinstance(self.ctype, W_CTypePrimitive) or
isinstance(w_other.ctype, W_CTypePrimitive)):
- raise OperationError(space.w_TypeError, space.wrap(
- "cannot do comparison on a primitive cdata"))
+ raise oefmt(space.w_TypeError,
+ "cannot do comparison on a primitive "
+ "cdata")
ptr1 = rffi.cast(lltype.Unsigned, ptr1)
ptr2 = rffi.cast(lltype.Unsigned, ptr2)
result = op(ptr1, ptr2)
@@ -175,22 +176,18 @@
space = self.space
#
if space.is_w(w_slice.w_start, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice start must be specified"))
+ raise oefmt(space.w_IndexError, "slice start must be specified")
start = space.int_w(w_slice.w_start)
#
if space.is_w(w_slice.w_stop, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice stop must be specified"))
+ raise oefmt(space.w_IndexError, "slice stop must be specified")
stop = space.int_w(w_slice.w_stop)
#
if not space.is_w(w_slice.w_step, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice with step not supported"))
+ raise oefmt(space.w_IndexError, "slice with step not supported")
#
if start > stop:
- raise OperationError(space.w_IndexError,
- space.wrap("slice start > stop"))
+ raise oefmt(space.w_IndexError, "slice start > stop")
#
ctype = self.ctype._check_slice_index(self, start, stop)
assert isinstance(ctype, W_CTypePointer)
diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py
--- a/pypy/module/_cffi_backend/ctypearray.py
+++ b/pypy/module/_cffi_backend/ctypearray.py
@@ -40,8 +40,8 @@
try:
datasize = ovfcheck(length * self.ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
else:
length = self.length
#
@@ -55,8 +55,7 @@
def _check_subscript_index(self, w_cdata, i):
space = self.space
if i < 0:
- raise OperationError(space.w_IndexError,
- space.wrap("negative index not supported"))
+ raise oefmt(space.w_IndexError, "negative index not supported")
if i >= w_cdata.get_array_length():
raise oefmt(space.w_IndexError,
"index too large for cdata '%s' (expected %d < %d)",
@@ -66,8 +65,7 @@
def _check_slice_index(self, w_cdata, start, stop):
space = self.space
if start < 0:
- raise OperationError(space.w_IndexError,
- space.wrap("negative index not supported"))
+ raise oefmt(space.w_IndexError, "negative index not supported")
if stop > w_cdata.get_array_length():
raise oefmt(space.w_IndexError,
"index too large (expected %d <= %d)",
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -471,5 +471,5 @@
# call libffi's ffi_prep_cif() function
res = jit_libffi.jit_ffi_prep_cif(rawmem)
if res != clibffi.FFI_OK:
- raise OperationError(space.w_SystemError,
- space.wrap("libffi failed to build this function type"))
+ raise oefmt(space.w_SystemError,
+ "libffi failed to build this function type")
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -185,26 +185,24 @@
except OperationError, e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError,
- space.wrap("field name or array index expected"))
+ raise oefmt(space.w_TypeError,
+ "field name or array index expected")
return self.typeoffsetof_index(index)
else:
return self.typeoffsetof_field(fieldname, following)
def typeoffsetof_field(self, fieldname, following):
- space = self.space
- msg = "with a field name argument, expected a struct or union ctype"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "with a field name argument, expected a struct or union "
+ "ctype")
def typeoffsetof_index(self, index):
- space = self.space
- msg = "with an integer argument, expected an array or pointer ctype"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "with an integer argument, expected an array or pointer "
+ "ctype")
def rawaddressof(self, cdata, offset):
- space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("expected a pointer ctype"))
+ raise oefmt(self.space.w_TypeError, "expected a pointer ctype")
def call(self, funcaddr, args_w):
space = self.space
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -289,8 +289,8 @@
try:
datasize = ovfcheck(length * itemsize)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
result = lltype.malloc(rffi.CCHARP.TO, datasize,
flavor='raw', zero=True)
try:
@@ -322,13 +322,12 @@
space = self.space
ctitem = self.ctitem
if ctitem.size < 0:
- raise OperationError(space.w_TypeError,
- space.wrap("pointer to opaque"))
+ raise oefmt(space.w_TypeError, "pointer to opaque")
try:
offset = ovfcheck(index * ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array offset would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array offset would overflow a ssize_t")
return ctitem, offset
def rawaddressof(self, cdata, offset):
@@ -341,9 +340,8 @@
ptr = rffi.ptradd(ptr, offset)
return cdataobj.W_CData(space, ptr, self)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expected a cdata struct/union/array/pointer"
- " object"))
+ raise oefmt(space.w_TypeError,
+ "expected a cdata struct/union/array/pointer object")
def _fget(self, attrchar):
if attrchar == 'i': # item
@@ -377,8 +375,7 @@
if w_fileobj.cffi_fileobj is None:
fd = w_fileobj.direct_fileno()
if fd < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("file has no OS file descriptor"))
+ raise oefmt(space.w_ValueError, "file has no OS file descriptor")
try:
w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
except OSError, e:
diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
--- a/pypy/module/_cffi_backend/ctypestruct.py
+++ b/pypy/module/_cffi_backend/ctypestruct.py
@@ -94,8 +94,7 @@
except KeyError:
raise OperationError(space.w_KeyError, space.wrap(fieldname))
if cfield.bitshift >= 0:
- raise OperationError(space.w_TypeError,
- space.wrap("not supported for bitfields"))
+ raise oefmt(space.w_TypeError, "not supported for bitfields")
return (cfield.ctype, cfield.offset)
def _copy_from_same(self, cdata, w_ob):
@@ -243,8 +242,8 @@
varsize = ovfcheck(itemsize * varsizelength)
size = ovfcheck(self.offset + varsize)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
assert size >= 0
return max(size, optvarsize)
# if 'value' was only an integer, get_new_array_length() returns
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -44,8 +44,7 @@
raise oefmt(space.w_ValueError,
"ctype '%s' is of unknown size", w_obj.name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expected a 'cdata' or 'ctype' object"))
+ raise oefmt(space.w_TypeError, "expected a 'cdata' or 'ctype' object")
return space.wrap(size)
@unwrap_spec(w_ctype=ctypeobj.W_CType)
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -1,6 +1,6 @@
from __future__ import with_statement
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib import jit
from rpython.rlib.objectmodel import specialize
@@ -285,8 +285,7 @@
try:
return _standard_object_as_bool(space, w_io)
except _NotStandardObject:
- raise OperationError(space.w_TypeError,
- space.wrap("integer/float expected"))
+ raise oefmt(space.w_TypeError, "integer/float expected")
# ____________________________________________________________
@@ -300,8 +299,7 @@
else:
explicitlength = space.getindex_w(w_value, space.w_OverflowError)
if explicitlength < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative array length"))
+ raise oefmt(space.w_ValueError, "negative array length")
return (space.w_None, explicitlength)
# ____________________________________________________________
diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -181,16 +181,14 @@
else:
length = space.getindex_w(w_length, space.w_OverflowError)
if length < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative array length"))
+ raise oefmt(space.w_ValueError, "negative array length")
return _new_array_type(space, w_ctptr, length)
@jit.elidable
def _new_array_type(space, w_ctptr, length):
_setup_wref(rweakref.has_weakref_support())
if not isinstance(w_ctptr, ctypeptr.W_CTypePointer):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a pointer ctype"))
+ raise oefmt(space.w_TypeError, "first arg must be a pointer ctype")
arrays = w_ctptr._array_types
if arrays is None:
arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray)
@@ -212,8 +210,8 @@
try:
arraysize = ovfcheck(length * ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
extra = '[%d]' % length
#
ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra)
@@ -290,9 +288,9 @@
sflags = complete_sflags(sflags)
if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion)
or w_ctype.size >= 0):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a non-initialized"
- " struct or union ctype"))
+ raise oefmt(space.w_TypeError,
+ "first arg must be a non-initialized struct or union "
+ "ctype")
is_union = isinstance(w_ctype, ctypestruct.W_CTypeUnion)
alignment = 1
@@ -310,8 +308,7 @@
w_field = fields_w[i]
field_w = space.fixedview(w_field)
if not (2 <= len(field_w) <= 4):
- raise OperationError(space.w_TypeError,
- space.wrap("bad field descr"))
+ raise oefmt(space.w_TypeError, "bad field descr")
fname = space.str_w(field_w[0])
ftype = space.interp_w(ctypeobj.W_CType, field_w[1])
fbitsize = -1
@@ -564,14 +561,13 @@
enumerators_w = space.fixedview(w_enumerators)
enumvalues_w = space.fixedview(w_enumvalues)
if len(enumerators_w) != len(enumvalues_w):
- raise OperationError(space.w_ValueError,
- space.wrap("tuple args must have the same size"))
+ raise oefmt(space.w_ValueError, "tuple args must have the same size")
enumerators = [space.str_w(w) for w in enumerators_w]
#
if (not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveSigned) and
not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveUnsigned)):
- raise OperationError(space.w_TypeError,
- space.wrap("expected a primitive signed or unsigned base type"))
+ raise oefmt(space.w_TypeError,
+ "expected a primitive signed or unsigned base type")
#
lvalue = lltype.malloc(rffi.CCHARP.TO, w_basectype.size, flavor='raw')
try:
@@ -601,8 +597,8 @@
fargs = []
for w_farg in space.fixedview(w_fargs):
if not isinstance(w_farg, ctypeobj.W_CType):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a tuple of ctype objects"))
+ raise oefmt(space.w_TypeError,
+ "first arg must be a tuple of ctype objects")
if isinstance(w_farg, ctypearray.W_CTypeArray):
w_farg = w_farg.ctptr
fargs.append(w_farg)
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -119,9 +119,7 @@
if space.is_true(space.callable(w_search_function)):
state.codec_search_path.append(w_search_function)
else:
- raise OperationError(
- space.w_TypeError,
- space.wrap("argument must be callable"))
+ raise oefmt(space.w_TypeError, "argument must be callable")
@unwrap_spec(encoding=str)
@@ -148,19 +146,17 @@
space.call_function(w_import, space.wrap("encodings"))
state.codec_need_encodings = False
if len(state.codec_search_path) == 0:
- raise OperationError(
- space.w_LookupError,
- space.wrap("no codec search functions registered: "
- "can't find encoding"))
+ raise oefmt(space.w_LookupError,
+ "no codec search functions registered: can't find "
+ "encoding")
for w_search in state.codec_search_path:
w_result = space.call_function(w_search,
space.wrap(normalized_encoding))
if not space.is_w(w_result, space.w_None):
if not (space.isinstance_w(w_result, space.w_tuple) and
space.len_w(w_result) == 4):
- raise OperationError(
- space.w_TypeError,
- space.wrap("codec search functions must return 4-tuples"))
+ raise oefmt(space.w_TypeError,
+ "codec search functions must return 4-tuples")
else:
state.codec_search_cache[normalized_encoding] = w_result
state.modified()
@@ -178,22 +174,19 @@
except OperationError, e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- "wrong exception"))
+ raise oefmt(space.w_TypeError, "wrong exception")
delta = space.int_w(w_end) - space.int_w(w_start)
if delta < 0 or not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- "wrong exception"))
+ raise oefmt(space.w_TypeError, "wrong exception")
def strict_errors(space, w_exc):
check_exception(space, w_exc)
if space.isinstance_w(w_exc, space.w_BaseException):
raise OperationError(space.type(w_exc), w_exc)
else:
- raise OperationError(space.w_TypeError, space.wrap(
- "codec must pass exception instance"))
+ raise oefmt(space.w_TypeError, "codec must pass exception instance")
def ignore_errors(space, w_exc):
check_exception(space, w_exc)
@@ -350,9 +343,8 @@
if space.is_true(w_decoder):
w_res = space.call_function(w_decoder, w_obj, space.wrap(errors))
if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 2):
- raise OperationError(
- space.w_TypeError,
- space.wrap("encoder must return a tuple (object, integer)"))
+ raise oefmt(space.w_TypeError,
+ "encoder must return a tuple (object, integer)")
return space.getitem(w_res, space.wrap(0))
else:
assert 0, "XXX, what to do here?"
@@ -371,9 +363,7 @@
if space.is_true(space.callable(w_handler)):
state.codec_error_registry[errors] = w_handler
else:
- raise OperationError(
- space.w_TypeError,
- space.wrap("handler must be callable"))
+ raise oefmt(space.w_TypeError, "handler must be callable")
# ____________________________________________________________
# delegation to runicode
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -4,7 +4,7 @@
from pypy.interpreter.typedef import TypeDef, make_weakref_descr
from pypy.interpreter.typedef import GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib.debug import check_nonneg
@@ -76,9 +76,8 @@
def checklock(self, lock):
if lock is not self.lock:
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "deque mutated during iteration")
def init(self, w_iterable=None, w_maxlen=None):
space = self.space
@@ -200,8 +199,7 @@
def pop(self):
"Remove and return the rightmost element."
if self.len == 0:
- msg = "pop from an empty deque"
- raise OperationError(self.space.w_IndexError, self.space.wrap(msg))
+ raise oefmt(self.space.w_IndexError, "pop from an empty deque")
self.len -= 1
ri = self.rightindex
w_obj = self.rightblock.data[ri]
@@ -224,8 +222,7 @@
def popleft(self):
"Remove and return the leftmost element."
if self.len == 0:
- msg = "pop from an empty deque"
- raise OperationError(self.space.w_IndexError, self.space.wrap(msg))
+ raise oefmt(self.space.w_IndexError, "pop from an empty deque")
self.len -= 1
li = self.leftindex
w_obj = self.leftblock.data[li]
@@ -263,8 +260,7 @@
if index >= BLOCKLEN:
block = block.rightlink
index = 0
- raise OperationError(space.w_ValueError,
- space.wrap("deque.remove(x): x not in deque"))
+ raise oefmt(space.w_ValueError, "deque.remove(x): x not in deque")
def reverse(self):
"Reverse *IN PLACE*."
@@ -371,8 +367,7 @@
b, i = self.locate(start)
return b.data[i]
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def setitem(self, w_index, w_newobj):
space = self.space
@@ -381,8 +376,7 @@
b, i = self.locate(start)
b.data[i] = w_newobj
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def delitem(self, w_index):
space = self.space
@@ -390,8 +384,7 @@
if step == 0: # index only
self.del_item(start)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def copy(self):
"Return a shallow copy of a deque."
@@ -520,13 +513,12 @@
return self.space.wrap(self.counter)
def next(self):
+ space = self.space
if self.lock is not self.deque.lock:
self.counter = 0
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(space.w_RuntimeError, "deque mutated during iteration")
if self.counter == 0:
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
self.counter -= 1
ri = self.index
w_x = self.block.data[ri]
@@ -563,13 +555,12 @@
return self.space.wrap(self.counter)
def next(self):
+ space = self.space
if self.lock is not self.deque.lock:
self.counter = 0
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(space.w_RuntimeError, "deque mutated during iteration")
if self.counter == 0:
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
self.counter -= 1
ri = self.index
w_x = self.block.data[ri]
diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py
--- a/pypy/module/_csv/interp_csv.py
+++ b/pypy/module/_csv/interp_csv.py
@@ -106,18 +106,17 @@
# validate options
if not (0 <= tmp_quoting < 4):
- raise OperationError(space.w_TypeError,
- space.wrap('bad "quoting" value'))
+ raise oefmt(space.w_TypeError, 'bad "quoting" value')
if dialect.delimiter == '\0':
- raise OperationError(space.w_TypeError,
- space.wrap('"delimiter" must be a 1-character string'))
+ raise oefmt(space.w_TypeError,
+ '"delimiter" must be a 1-character string')
if space.is_w(w_quotechar, space.w_None) and w_quoting is None:
tmp_quoting = QUOTE_NONE
if tmp_quoting != QUOTE_NONE and dialect.quotechar == '\0':
- raise OperationError(space.w_TypeError,
- space.wrap('quotechar must be set if quoting enabled'))
+ raise oefmt(space.w_TypeError,
+ "quotechar must be set if quoting enabled")
dialect.quoting = tmp_quoting
return dialect
diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py
--- a/pypy/module/_csv/interp_reader.py
+++ b/pypy/module/_csv/interp_reader.py
@@ -1,6 +1,6 @@
from rpython.rlib.rstring import StringBuilder
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.typedef import TypeDef, interp2app
from pypy.interpreter.typedef import interp_attrproperty_w, interp_attrproperty
@@ -27,10 +27,9 @@
def error(self, msg):
space = self.space
- msg = 'line %d: %s' % (self.line_num, msg)
w_module = space.getbuiltinmodule('_csv')
w_error = space.getattr(w_module, space.wrap('Error'))
- raise OperationError(w_error, space.wrap(msg))
+ raise oefmt(w_error, "line %d: %s", self.line_num, msg)
error._dont_inline_ = True
def add_char(self, field_builder, c):
diff --git a/pypy/module/_demo/demo.py b/pypy/module/_demo/demo.py
--- a/pypy/module/_demo/demo.py
+++ b/pypy/module/_demo/demo.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty
@@ -22,8 +22,7 @@
def measuretime(space, repetitions, w_callable):
if repetitions <= 0:
w_DemoError = get(space, 'DemoError')
- msg = "repetition count must be > 0"
- raise OperationError(w_DemoError, space.wrap(msg))
+ raise oefmt(w_DemoError, "repetition count must be > 0")
starttime = time(0)
for i in range(repetitions):
space.call_function(w_callable)
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -94,19 +94,16 @@
def check_closed(self):
if self.stream is None:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("I/O operation on closed file")
- )
+ raise oefmt(self.space.w_ValueError,
+ "I/O operation on closed file")
def check_readable(self):
if not self.readable:
- raise OperationError(self.space.w_IOError, self.space.wrap(
- "File not open for reading"))
+ raise oefmt(self.space.w_IOError, "File not open for reading")
def check_writable(self):
if not self.writable:
- raise OperationError(self.space.w_IOError, self.space.wrap(
- "File not open for writing"))
+ raise oefmt(self.space.w_IOError, "File not open for writing")
def getstream(self):
"""Return self.stream or raise an app-level ValueError if missing
@@ -512,8 +509,9 @@
else:
line = w_line.charbuf_w(space)
except BufferInterfaceNotFound:
- raise OperationError(space.w_TypeError, space.wrap(
- "writelines() argument must be a sequence of strings"))
+ raise oefmt(space.w_TypeError,
+ "writelines() argument must be a sequence of "
+ "strings")
else:
lines[i] = space.wrap(line)
for w_line in lines:
diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -3,7 +3,7 @@
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
@@ -58,14 +58,12 @@
def lock(self):
if not self._try_acquire_lock():
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("stream lock already held"))
+ raise oefmt(self.space.w_RuntimeError, "stream lock already held")
def unlock(self):
me = self.space.getexecutioncontext() # used as thread ident
if self.slockowner is not me:
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("stream lock is not held"))
+ raise oefmt(self.space.w_RuntimeError, "stream lock is not held")
self._release_lock()
def _cleanup_(self):
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -7,7 +7,7 @@
from rpython.tool.sourcetools import func_renamer
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, interp2app
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.module.thread.os_lock import Lock
@@ -85,8 +85,7 @@
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
if not digest_type:
- raise OperationError(space.w_ValueError,
- space.wrap("unknown hash function"))
+ raise oefmt(space.w_ValueError, "unknown hash function")
return digest_type
def descr_repr(self, space):
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -42,8 +42,7 @@
## self.lock.free()
self.lock = space.allocate_lock()
self.owner = 0
- self.operr = OperationError(space.w_RuntimeError,
- space.wrap("reentrant call"))
+ self.operr = oefmt(space.w_RuntimeError, "reentrant call")
def __enter__(self):
if not self.lock.acquire(False):
@@ -91,8 +90,7 @@
w_data = space.call_method(self, "read", space.wrap(length))
if not space.isinstance_w(w_data, space.w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "read() should return bytes"))
+ raise oefmt(space.w_TypeError, "read() should return bytes")
data = space.str_w(w_data)
rwbuffer.setslice(0, data)
return space.wrap(len(data))
@@ -157,8 +155,8 @@
def _init(self, space):
if self.buffer_size <= 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "buffer size must be strictly positive"))
+ raise oefmt(space.w_ValueError,
+ "buffer size must be strictly positive")
self.buffer = ['\0'] * self.buffer_size
@@ -171,11 +169,10 @@
def _check_init(self, space):
if self.state == STATE_ZERO:
- raise OperationError(space.w_ValueError, space.wrap(
- "I/O operation on uninitialized object"))
+ raise oefmt(space.w_ValueError,
+ "I/O operation on uninitialized object")
elif self.state == STATE_DETACHED:
- raise OperationError(space.w_ValueError, space.wrap(
- "raw stream has been detached"))
+ raise oefmt(space.w_ValueError, "raw stream has been detached")
def _check_closed(self, space, message=None):
self._check_init(space)
@@ -185,8 +182,8 @@
w_pos = space.call_method(self.w_raw, "tell")
pos = space.r_longlong_w(w_pos)
if pos < 0:
- raise OperationError(space.w_IOError, space.wrap(
- "raw stream returned invalid position"))
+ raise oefmt(space.w_IOError,
+ "raw stream returned invalid position")
self.abs_pos = pos
return pos
@@ -297,8 +294,8 @@
space.wrap(pos), space.wrap(whence))
pos = space.r_longlong_w(w_pos)
if pos < 0:
- raise OperationError(space.w_IOError, space.wrap(
- "Raw stream returned invalid position"))
+ raise oefmt(space.w_IOError,
+ "Raw stream returned invalid position")
self.abs_pos = pos
return pos
@@ -363,8 +360,7 @@
written = space.getindex_w(w_written, space.w_IOError)
if not 0 <= written <= len(data):
- raise OperationError(space.w_IOError, space.wrap(
- "raw write() returned invalid length"))
+ raise oefmt(space.w_IOError, "raw write() returned invalid length")
if self.abs_pos != -1:
self.abs_pos += written
return written
@@ -417,8 +413,8 @@
with self.lock:
res = self._read_generic(space, size)
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "read length must be positive or -1"))
+ raise oefmt(space.w_ValueError,
+ "read length must be positive or -1")
return space.wrap(res)
@unwrap_spec(size=int)
@@ -454,8 +450,7 @@
self._check_closed(space, "read of closed file")
if size < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "read length must be positive"))
+ raise oefmt(space.w_ValueError, "read length must be positive")
if size == 0:
return space.wrap("")
@@ -537,9 +532,9 @@
raise BlockingIOError()
size = space.int_w(w_size)
if size < 0 or size > length:
- raise OperationError(space.w_IOError, space.wrap(
- "raw readinto() returned invalid length %d "
- "(should have been between 0 and %d)" % (size, length)))
+ raise oefmt(space.w_IOError,
+ "raw readinto() returned invalid length %d (should "
+ "have been between 0 and %d)", size, length)
if self.abs_pos != -1:
self.abs_pos += size
return size
diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py
--- a/pypy/module/_io/interp_bytesio.py
+++ b/pypy/module/_io/interp_bytesio.py
@@ -70,8 +70,7 @@
size = space.r_longlong_w(w_size)
if size < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative size value"))
+ raise oefmt(space.w_ValueError, "negative size value")
self.truncate(size)
if size == pos:
@@ -94,16 +93,13 @@
if whence == 0:
if pos < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative seek value"))
+ raise oefmt(space.w_ValueError, "negative seek value")
elif whence == 1:
if pos > sys.maxint - self.tell():
- raise OperationError(space.w_OverflowError, space.wrap(
- "new position too large"))
+ raise oefmt(space.w_OverflowError, "new position too large")
elif whence == 2:
if pos > sys.maxint - self.getsize():
- raise OperationError(space.w_OverflowError, space.wrap(
- "new position too large"))
+ raise oefmt(space.w_OverflowError, "new position too large")
else:
raise oefmt(space.w_ValueError,
"whence must be between 0 and 2, not %d", whence)
@@ -148,8 +144,8 @@
self.write_w(space, w_content)
pos = space.int_w(w_pos)
if pos < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "position value cannot be negative"))
+ raise oefmt(space.w_ValueError,
+ "position value cannot be negative")
self.seek(pos)
if not space.is_w(w_dict, space.w_None):
space.call_method(self.getdict(space), "update", w_dict)
diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py
--- a/pypy/module/_io/interp_fileio.py
+++ b/pypy/module/_io/interp_fileio.py
@@ -1,6 +1,7 @@
from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2
+from pypy.interpreter.error import (
+ OperationError, oefmt, wrap_oserror, wrap_oserror2)
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.rstring import StringBuilder
from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC
@@ -12,8 +13,7 @@
def fget(space, obj):
w_value = getattr(obj, name)
if w_value is None:
- raise OperationError(space.w_AttributeError,
- space.wrap(name))
+ raise OperationError(space.w_AttributeError, space.wrap(name))
else:
return w_value
def fset(space, obj, w_value):
@@ -21,8 +21,7 @@
def fdel(space, obj):
w_value = getattr(obj, name)
if w_value is None:
- raise OperationError(space.w_AttributeError,
- space.wrap(name))
+ raise OperationError(space.w_AttributeError, space.wrap(name))
setattr(obj, name, None)
return GetSetProperty(fget, fset, fdel, cls=cls, doc=doc)
@@ -32,8 +31,8 @@
O_APPEND = getattr(os, "O_APPEND", 0)
def _bad_mode(space):
- raise OperationError(space.w_ValueError, space.wrap(
- "Must have exactly one of read/write/append mode"))
+ raise oefmt(space.w_ValueError,
+ "Must have exactly one of read/write/append mode")
def decode_mode(space, mode):
flags = 0
@@ -70,8 +69,7 @@
readable = writable = True
plus = True
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid mode: %s" % (mode,)))
+ raise oefmt(space.w_ValueError, "invalid mode: %s", mode)
if not rwa:
_bad_mode(space)
@@ -133,8 +131,8 @@
@unwrap_spec(mode=str, closefd=int)
def descr_init(self, space, w_name, mode='r', closefd=True):
if space.isinstance_w(w_name, space.w_float):
- raise OperationError(space.w_TypeError, space.wrap(
- "integer argument expected, got float"))
+ raise oefmt(space.w_TypeError,
+ "integer argument expected, got float")
fd = -1
try:
@@ -143,8 +141,7 @@
pass
else:
if fd < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative file descriptor"))
+ raise oefmt(space.w_ValueError, "negative file descriptor")
self.readable, self.writable, self.appending, flags = decode_mode(space, mode)
@@ -162,8 +159,8 @@
else:
self.closefd = True
if not closefd:
- raise OperationError(space.w_ValueError, space.wrap(
- "Cannot use closefd=False with file name"))
+ raise oefmt(space.w_ValueError,
+ "Cannot use closefd=False with file name")
from pypy.module.posix.interp_posix import (
dispatch_filename, rposix)
@@ -219,15 +216,11 @@
def _check_readable(self, space):
if not self.readable:
- raise OperationError(
- space.w_ValueError,
- space.wrap("file not open for reading"))
+ raise oefmt(space.w_ValueError, "file not open for reading")
def _check_writable(self, space):
if not self.writable:
- raise OperationError(
- space.w_ValueError,
- space.wrap("file not open for writing"))
+ raise oefmt(space.w_ValueError, "file not open for writing")
def _close(self, space):
if self.fd < 0:
diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py
--- a/pypy/module/_io/interp_io.py
+++ b/pypy/module/_io/interp_io.py
@@ -89,25 +89,19 @@
rawmode += "+"
if universal and (writing or appending):
- raise OperationError(space.w_ValueError,
- space.wrap("can't use U and writing mode at once")
- )
+ raise oefmt(space.w_ValueError, "can't use U and writing mode at once")
if text and binary:
- raise OperationError(space.w_ValueError,
- space.wrap("can't have text and binary mode at once")
- )
+ raise oefmt(space.w_ValueError,
+ "can't have text and binary mode at once")
if reading + writing + appending > 1:
- raise OperationError(space.w_ValueError,
- space.wrap("must have exactly one of read/write/append mode")
- )
+ raise oefmt(space.w_ValueError,
+ "must have exactly one of read/write/append mode")
if binary and encoding is not None:
- raise OperationError(space.w_ValueError,
- space.wrap("binary mode doesn't take an encoding argument")
- )
+ raise oefmt(space.w_ValueError,
+ "binary mode doesn't take an encoding argument")
if binary and newline is not None:
- raise OperationError(space.w_ValueError,
- space.wrap("binary mode doesn't take a newline argument")
- )
+ raise oefmt(space.w_ValueError,
+ "binary mode doesn't take a newline argument")
w_raw = space.call_function(
space.gettypefor(W_FileIO), w_file, space.wrap(rawmode), space.wrap(closefd)
)
@@ -132,15 +126,11 @@
buffering = st.st_blksize
if buffering < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid buffering size")
- )
+ raise oefmt(space.w_ValueError, "invalid buffering size")
if buffering == 0:
if not binary:
- raise OperationError(space.w_ValueError,
- space.wrap("can't have unbuffered text I/O")
- )
+ raise oefmt(space.w_ValueError, "can't have unbuffered text I/O")
return w_raw
if updating:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -36,23 +36,17 @@
# May be called with any object
def check_readable_w(space, w_obj):
if not space.is_true(space.call_method(w_obj, 'readable')):
- raise OperationError(
- space.w_IOError,
- space.wrap("file or stream is not readable"))
+ raise oefmt(space.w_IOError, "file or stream is not readable")
# May be called with any object
def check_writable_w(space, w_obj):
if not space.is_true(space.call_method(w_obj, 'writable')):
- raise OperationError(
- space.w_IOError,
- space.wrap("file or stream is not writable"))
+ raise oefmt(space.w_IOError, "file or stream is not writable")
# May be called with any object
def check_seekable_w(space, w_obj):
if not space.is_true(space.call_method(w_obj, 'seekable')):
- raise OperationError(
- space.w_IOError,
- space.wrap("file or stream is not seekable"))
+ raise oefmt(space.w_IOError, "file or stream is not seekable")
class W_IOBase(W_Root):
@@ -129,9 +123,7 @@
def flush_w(self, space):
if self._CLOSED():
- raise OperationError(
- space.w_ValueError,
- space.wrap("I/O operation on closed file"))
+ raise oefmt(space.w_ValueError, "I/O operation on closed file")
def seek_w(self, space, w_offset, w_whence=None):
self._unsupportedoperation(space, "seek")
@@ -349,8 +341,7 @@
break
if not space.isinstance_w(w_data, space.w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "read() should return bytes"))
+ raise oefmt(space.w_TypeError, "read() should return bytes")
data = space.str_w(w_data)
if not data:
break
diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py
--- a/pypy/module/_io/interp_stringio.py
+++ b/pypy/module/_io/interp_stringio.py
@@ -89,9 +89,8 @@
self.buf = list(initval)
pos = space.getindex_w(w_pos, space.w_TypeError)
if pos < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("position value cannot be negative")
- )
+ raise oefmt(space.w_ValueError,
+ "position value cannot be negative")
self.pos = pos
if not space.is_w(w_dict, space.w_None):
if not space.isinstance_w(w_dict, space.w_dict):
@@ -203,9 +202,7 @@
elif mode == 0 and pos < 0:
raise oefmt(space.w_ValueError, "negative seek position: %d", pos)
elif mode != 0 and pos != 0:
- raise OperationError(space.w_IOError,
- space.wrap("Can't do nonzero cur-relative seeks")
- )
+ raise oefmt(space.w_IOError, "Can't do nonzero cur-relative seeks")
# XXX: this makes almost no sense, but its how CPython does it.
if mode == 1:
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -59,8 +59,8 @@
@unwrap_spec(final=int)
def decode_w(self, space, w_input, final=False):
if self.w_decoder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "IncrementalNewlineDecoder.__init__ not called"))
+ raise oefmt(space.w_ValueError,
+ "IncrementalNewlineDecoder.__init__ not called")
# decode input (with the eventual \r from a previous pass)
if not space.is_w(self.w_decoder, space.w_None):
@@ -70,8 +70,8 @@
w_output = w_input
if not space.isinstance_w(w_output, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- "decoder should return a string result"))
+ raise oefmt(space.w_TypeError,
+ "decoder should return a string result")
output = space.unicode_w(w_output)
output_len = len(output)
@@ -287,8 +287,7 @@
if space.isinstance_w(w_encoding, space.w_str):
return w_encoding
- raise OperationError(space.w_IOError, space.wrap(
- "could not determine default encoding"))
+ raise oefmt(space.w_IOError, "could not determine default encoding")
class PositionCookie(object):
def __init__(self, bigint):
@@ -377,8 +376,8 @@
newline = space.unicode_w(w_newline)
if newline and newline not in (u'\n', u'\r\n', u'\r'):
r = space.str_w(space.repr(w_newline))
- raise OperationError(space.w_ValueError, space.wrap(
- "illegal newline value: %s" % (r,)))
+ raise oefmt(space.w_ValueError,
+ "illegal newline value: %s", r)
self.line_buffering = line_buffering
@@ -429,13 +428,13 @@
def _check_init(self, space):
if self.state == STATE_ZERO:
- raise OperationError(space.w_ValueError, space.wrap(
- "I/O operation on uninitialized object"))
+ raise oefmt(space.w_ValueError,
+ "I/O operation on uninitialized object")
def _check_attached(self, space):
if self.state == STATE_DETACHED:
- raise OperationError(space.w_ValueError, space.wrap(
- "underlying buffer has been detached"))
+ raise oefmt(space.w_ValueError,
+ "underlying buffer has been detached")
self._check_init(space)
def _check_closed(self, space, message=None):
@@ -548,7 +547,7 @@
remain buffered in the decoder, yet to be converted."""
if not self.w_decoder:
- raise OperationError(space.w_IOError, space.wrap("not readable"))
+ raise oefmt(space.w_IOError, "not readable")
if self.telling:
# To prepare for tell(), we need to snapshot a point in the file
@@ -602,7 +601,7 @@
self._check_attached(space)
self._check_closed(space)
if not self.w_decoder:
- raise OperationError(space.w_IOError, space.wrap("not readable"))
+ raise oefmt(space.w_IOError, "not readable")
size = convert_size(space, w_size)
self._writeflush(space)
@@ -741,11 +740,11 @@
self._check_closed(space)
if not self.w_encoder:
- raise OperationError(space.w_IOError, space.wrap("not writable"))
+ raise oefmt(space.w_IOError, "not writable")
if not space.isinstance_w(w_text, space.w_unicode):
- msg = "unicode argument expected, got '%T'"
- raise oefmt(space.w_TypeError, msg, w_text)
+ raise oefmt(space.w_TypeError,
+ "unicode argument expected, got '%T'", w_text)
text = space.unicode_w(w_text)
textlen = len(text)
@@ -845,14 +844,13 @@
self._check_attached(space)
if not self.seekable:
- raise OperationError(space.w_IOError, space.wrap(
- "underlying stream is not seekable"))
+ raise oefmt(space.w_IOError, "underlying stream is not seekable")
if whence == 1:
# seek relative to current position
if not space.is_true(space.eq(w_pos, space.wrap(0))):
- raise OperationError(space.w_IOError, space.wrap(
- "can't do nonzero cur-relative seeks"))
+ raise oefmt(space.w_IOError,
+ "can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to sync the
# underlying buffer with the current position.
w_pos = space.call_method(self, "tell")
@@ -860,8 +858,8 @@
elif whence == 2:
# seek relative to end of file
if not space.is_true(space.eq(w_pos, space.wrap(0))):
- raise OperationError(space.w_IOError, space.wrap(
- "can't do nonzero end-relative seeks"))
+ raise oefmt(space.w_IOError,
+ "can't do nonzero end-relative seeks")
space.call_method(self, "flush")
self._set_decoded_chars(None)
self.snapshot = None
@@ -871,13 +869,14 @@
w_pos, space.wrap(whence))
elif whence != 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid whence (%d, should be 0, 1 or 2)" % (whence,)))
+ raise oefmt(space.w_ValueError,
+ "invalid whence (%d, should be 0, 1 or 2)",
+ whence)
if space.is_true(space.lt(w_pos, space.wrap(0))):
r = space.str_w(space.repr(w_pos))
- raise OperationError(space.w_ValueError, space.wrap(
- "negative seek position %s" % (r,)))
+ raise oefmt(space.w_ValueError,
+ "negative seek position %s", r)
space.call_method(self, "flush")
@@ -914,8 +913,8 @@
# Skip chars_to_skip of the decoded characters
if len(self.decoded_chars) < cookie.chars_to_skip:
- raise OperationError(space.w_IOError, space.wrap(
- "can't restore logical file position"))
+ raise oefmt(space.w_IOError,
+ "can't restore logical file position")
self.decoded_chars_used = cookie.chars_to_skip
else:
self.snapshot = PositionSnapshot(cookie.dec_flags, "")
@@ -930,12 +929,11 @@
self._check_closed(space)
if not self.seekable:
- raise OperationError(space.w_IOError, space.wrap(
- "underlying stream is not seekable"))
+ raise oefmt(space.w_IOError, "underlying stream is not seekable")
if not self.telling:
- raise OperationError(space.w_IOError, space.wrap(
- "telling position disabled by next() call"))
+ raise oefmt(space.w_IOError,
+ "telling position disabled by next() call")
self._writeflush(space)
space.call_method(self, "flush")
@@ -1008,8 +1006,8 @@
cookie.need_eof = 1
if chars_decoded < chars_to_skip:
- raise OperationError(space.w_IOError, space.wrap(
- "can't reconstruct logical file position"))
+ raise oefmt(space.w_IOError,
+ "can't reconstruct logical file position")
finally:
space.call_method(self.w_decoder, "setstate", w_saved_state)
@@ -1025,9 +1023,8 @@
self._check_attached(space)
size = space.int_w(w_size)
if size <= 0:
- raise OperationError(space.w_ValueError,
- space.wrap("a strictly positive integer is required")
- )
+ raise oefmt(space.w_ValueError,
+ "a strictly positive integer is required")
self.chunk_size = size
W_TextIOWrapper.typedef = TypeDef(
diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py
--- a/pypy/module/_locale/interp_locale.py
+++ b/pypy/module/_locale/interp_locale.py
@@ -1,7 +1,7 @@
from rpython.rlib import rposix
from rpython.rlib.rarithmetic import intmask
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib import rlocale
@@ -186,8 +186,7 @@
try:
return space.wrap(rlocale.nl_langinfo(key))
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("unsupported langinfo constant"))
+ raise oefmt(space.w_ValueError, "unsupported langinfo constant")
#___________________________________________________________________
# HAVE_LIBINTL dependence
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -1,7 +1,7 @@
import py
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import Method, Function
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
@@ -418,9 +418,9 @@
def getstats(self, space):
if self.w_callable is None:
if self.is_enabled:
- raise OperationError(space.w_RuntimeError,
- space.wrap("Profiler instance must be disabled "
- "before getting the stats"))
+ raise oefmt(space.w_RuntimeError,
+ "Profiler instance must be disabled before "
+ "getting the stats")
if self.total_timestamp:
factor = self.total_real_time / float(self.total_timestamp)
else:
diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py
--- a/pypy/module/_multibytecodec/interp_multibytecodec.py
+++ b/pypy/module/_multibytecodec/interp_multibytecodec.py
@@ -1,7 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module._multibytecodec import c_codecs
from pypy.module._codecs.interp_codecs import CodecState
@@ -57,8 +57,7 @@
try:
codec = c_codecs.getcodec(name)
except KeyError:
- raise OperationError(space.w_LookupError,
- space.wrap("no such codec is supported."))
From pypy.commits at gmail.com Mon May 2 02:33:05 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 23:33:05 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: oefmt pypy/module/!(_*)
Message-ID: <5726f4a1.89cbc20a.a5dd1.330b@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84121:3902fa8f3207
Date: 2016-05-01 23:26 -0700
http://bitbucket.org/pypy/pypy/changeset/3902fa8f3207/
Log: oefmt pypy/module/!(_*)
diff too long, truncating to 2000 out of 3818 lines
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -19,17 +19,16 @@
@unwrap_spec(typecode=str)
def w_array(space, w_cls, typecode, __args__):
if len(__args__.arguments_w) > 1:
- msg = 'array() takes at most 2 arguments'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "array() takes at most 2 arguments")
if len(typecode) != 1:
- msg = 'array() argument 1 must be char, not str'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "array() argument 1 must be char, not str")
typecode = typecode[0]
if space.is_w(w_cls, space.gettypeobject(W_ArrayBase.typedef)):
if __args__.keywords:
- msg = 'array.array() does not take keyword arguments'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "array.array() does not take keyword arguments")
for tc in unroll_typecodes:
if typecode == tc:
@@ -46,8 +45,9 @@
a.extend(w_initializer, True)
break
else:
- msg = 'bad typecode (must be c, b, B, u, h, H, i, I, l, L, f or d)'
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "bad typecode (must be c, b, B, u, h, H, i, I, l, L, f or "
+ "d)")
return a
@@ -209,8 +209,7 @@
Append items to array from list.
"""
if not space.isinstance_w(w_lst, space.w_list):
- raise OperationError(space.w_TypeError,
- space.wrap("arg must be list"))
+ raise oefmt(space.w_TypeError, "arg must be list")
s = self.len
try:
self.fromsequence(w_lst)
@@ -240,8 +239,8 @@
"""
s = space.getarg_w('s#', w_s)
if len(s) % self.itemsize != 0:
- msg = 'string length not a multiple of item size'
- raise OperationError(self.space.w_ValueError, self.space.wrap(msg))
+ raise oefmt(self.space.w_ValueError,
+ "string length not a multiple of item size")
oldlen = self.len
new = len(s) / self.itemsize
if not new:
@@ -271,8 +270,7 @@
if n != 0:
item = item[0:elems]
self.descr_fromstring(space, space.wrap(item))
- msg = "not enough items in file"
- raise OperationError(space.w_EOFError, space.wrap(msg))
+ raise oefmt(space.w_EOFError, "not enough items in file")
self.descr_fromstring(space, w_item)
@unwrap_spec(w_f=W_File)
@@ -301,8 +299,8 @@
if self.typecode == 'u':
self.fromsequence(w_ustr)
else:
- msg = "fromunicode() may only be called on type 'u' arrays"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "fromunicode() may only be called on type 'u' arrays")
def descr_tounicode(self, space):
""" tounicode() -> unicode
@@ -316,8 +314,8 @@
buf = rffi.cast(UNICODE_ARRAY, self._buffer_as_unsigned())
return space.wrap(rffi.wcharpsize2unicode(buf, self.len))
else:
- msg = "tounicode() may only be called on type 'u' arrays"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "tounicode() may only be called on type 'u' arrays")
def descr_buffer_info(self, space):
""" buffer_info() -> (address, length)
@@ -366,8 +364,8 @@
not 1, 2, 4, or 8 bytes in size, RuntimeError is raised.
"""
if self.itemsize not in [1, 2, 4, 8]:
- msg = "byteswap not supported for this array"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "byteswap not supported for this array")
if self.len == 0:
return
bytes = self._charbuf_start()
@@ -665,15 +663,13 @@
try:
item = item.touint()
except (ValueError, OverflowError):
- msg = 'unsigned %d-byte integer out of range' % \
- mytype.bytes
- raise OperationError(space.w_OverflowError,
- space.wrap(msg))
+ raise oefmt(space.w_OverflowError,
+ "unsigned %d-byte integer out of range",
+ mytype.bytes)
return rffi.cast(mytype.itemtype, item)
if mytype.unwrap == 'str_w' or mytype.unwrap == 'unicode_w':
if len(item) != 1:
- msg = 'array item must be char'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "array item must be char")
item = item[0]
return rffi.cast(mytype.itemtype, item)
#
@@ -816,8 +812,8 @@
self.setlen(oldlen + i)
elif (not accept_different_array
and isinstance(w_iterable, W_ArrayBase)):
- msg = "can only extend with array of same kind"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can only extend with array of same kind")
else:
self.fromsequence(w_iterable)
@@ -861,8 +857,7 @@
w_item = self.w_getitem(space, i)
if space.is_true(space.eq(w_item, w_val)):
return space.wrap(i)
- msg = 'array.index(x): x not in list'
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError, "array.index(x): x not in list")
def descr_reverse(self, space):
b = self.buffer
@@ -873,8 +868,7 @@
if i < 0:
i += self.len
if i < 0 or i >= self.len:
- msg = 'pop index out of range'
- raise OperationError(space.w_IndexError, space.wrap(msg))
+ raise oefmt(space.w_IndexError, "pop index out of range")
w_val = self.w_getitem(space, i)
while i < self.len - 1:
self.buffer[i] = self.buffer[i + 1]
@@ -916,16 +910,15 @@
def setitem(self, space, w_idx, w_item):
idx, stop, step = space.decode_index(w_idx, self.len)
if step != 0:
- msg = 'can only assign array to array slice'
- raise OperationError(self.space.w_TypeError,
- self.space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "can only assign array to array slice")
item = self.item_w(w_item)
self.buffer[idx] = item
def setitem_slice(self, space, w_idx, w_item):
if not isinstance(w_item, W_Array):
- raise OperationError(space.w_TypeError, space.wrap(
- "can only assign to a slice array"))
+ raise oefmt(space.w_TypeError,
+ "can only assign to a slice array")
start, stop, step, size = self.space.decode_index4(w_idx, self.len)
assert step != 0
if w_item.len != size or self is w_item:
diff --git a/pypy/module/binascii/interp_hexlify.py b/pypy/module/binascii/interp_hexlify.py
--- a/pypy/module/binascii/interp_hexlify.py
+++ b/pypy/module/binascii/interp_hexlify.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import ovfcheck
@@ -38,8 +38,7 @@
elif c <= 'f':
if c >= 'a':
return ord(c) - (ord('a')-10)
- raise OperationError(space.w_TypeError,
- space.wrap('Non-hexadecimal digit found'))
+ raise oefmt(space.w_TypeError, "Non-hexadecimal digit found")
_char2value._always_inline_ = True
@unwrap_spec(hexstr='bufferstr')
@@ -48,8 +47,7 @@
hexstr must contain an even number of hex digits (upper or lower case).
This function is also available as "unhexlify()".'''
if len(hexstr) & 1:
- raise OperationError(space.w_TypeError,
- space.wrap('Odd-length string'))
+ raise oefmt(space.w_TypeError, "Odd-length string")
res = StringBuilder(len(hexstr) >> 1)
for i in range(0, len(hexstr), 2):
a = _char2value(space, hexstr[i])
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -154,24 +154,24 @@
def _catch_bz2_error(space, bzerror):
if BZ_CONFIG_ERROR and bzerror == BZ_CONFIG_ERROR:
- raise OperationError(space.w_SystemError,
- space.wrap("the bz2 library was not compiled correctly"))
+ raise oefmt(space.w_SystemError,
+ "the bz2 library was not compiled correctly")
if bzerror == BZ_PARAM_ERROR:
- raise OperationError(space.w_SystemError,
- space.wrap("the bz2 library has received wrong parameters"))
+ raise oefmt(space.w_SystemError,
+ "the bz2 library has received wrong parameters")
elif bzerror == BZ_MEM_ERROR:
raise OperationError(space.w_MemoryError, space.wrap(""))
elif bzerror in (BZ_DATA_ERROR, BZ_DATA_ERROR_MAGIC):
- raise OperationError(space.w_IOError, space.wrap("invalid data stream"))
+ raise oefmt(space.w_IOError, "invalid data stream")
elif bzerror == BZ_IO_ERROR:
- raise OperationError(space.w_IOError, space.wrap("unknown IO error"))
+ raise oefmt(space.w_IOError, "unknown IO error")
elif bzerror == BZ_UNEXPECTED_EOF:
- raise OperationError(space.w_EOFError,
- space.wrap(
- "compressed file ended before the logical end-of-stream was detected"))
+ raise oefmt(space.w_EOFError,
+ "compressed file ended before the logical end-of-stream "
+ "was detected")
elif bzerror == BZ_SEQUENCE_ERROR:
- raise OperationError(space.w_RuntimeError,
- space.wrap("wrong sequence of bz2 library commands used"))
+ raise oefmt(space.w_RuntimeError,
+ "wrong sequence of bz2 library commands used")
def _new_buffer_size(current_size):
# keep doubling until we reach BIGCHUNK; then the buffer size is no
@@ -326,11 +326,9 @@
from rpython.rlib.streamio import construct_stream_tower
os_flags, universal, reading, writing, basemode, binary = decode_mode(mode)
if reading and writing:
- raise OperationError(space.w_ValueError,
- space.wrap("cannot open in read-write mode"))
+ raise oefmt(space.w_ValueError, "cannot open in read-write mode")
if basemode == "a":
- raise OperationError(space.w_ValueError,
- space.wrap("cannot append to bz2 file"))
+ raise oefmt(space.w_ValueError, "cannot append to bz2 file")
stream = open_path_helper(space.str0_w(w_path), os_flags, False)
if reading:
bz2stream = ReadBZ2Filter(space, stream, buffering)
@@ -413,8 +411,9 @@
if raw:
w_result = self.decompressor.decompress(raw)
if self.decompressor.running:
- raise OperationError(self.space.w_EOFError,
- self.space.wrap("compressed file ended before the logical end-of-the-stream was detected"))
+ raise oefmt(self.space.w_EOFError,
+ "compressed file ended before the logical "
+ "end-of-the-stream was detected")
result = self.space.str_w(w_result)
self.readlength += len(result)
else:
@@ -468,8 +467,7 @@
return self.stream.try_to_find_file_descriptor()
def write(self, s):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("file is not ready for writing"))
+ raise oefmt(self.space.w_IOError, "file is not ready for writing")
class WriteBZ2Filter(Stream):
"""Standard I/O stream filter that compresses the stream with bz2."""
@@ -492,16 +490,13 @@
return self.writtenlength
def seek(self, offset, whence):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("seek works only while reading"))
+ raise oefmt(self.space.w_IOError, "seek works only while reading")
def read(self, n):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("file is not ready for reading"))
+ raise oefmt(self.space.w_IOError, "file is not ready for reading")
def readall(self):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("file is not ready for reading"))
+ raise oefmt(self.space.w_IOError, "file is not ready for reading")
def try_to_find_file_descriptor(self):
return self.stream.try_to_find_file_descriptor()
@@ -528,8 +523,8 @@
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("compresslevel must be between 1 and 9"))
+ raise oefmt(self.space.w_ValueError,
+ "compresslevel must be between 1 and 9")
bzerror = intmask(BZ2_bzCompressInit(self.bzs, compresslevel, 0, 0))
if bzerror != BZ_OK:
@@ -556,8 +551,8 @@
return self.space.wrap("")
if not self.running:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("this object was already flushed"))
+ raise oefmt(self.space.w_ValueError,
+ "this object was already flushed")
in_bufsize = datasize
@@ -582,8 +577,8 @@
def flush(self):
if not self.running:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("this object was already flushed"))
+ raise oefmt(self.space.w_ValueError,
+ "this object was already flushed")
self.running = False
with OutBuffer(self.bzs) as out:
@@ -653,8 +648,8 @@
unused_data attribute."""
if not self.running:
- raise OperationError(self.space.w_EOFError,
- self.space.wrap("end of stream was already found"))
+ raise oefmt(self.space.w_EOFError,
+ "end of stream was already found")
if data == '':
return self.space.wrap('')
@@ -705,8 +700,8 @@
given, must be a number between 1 and 9."""
if compresslevel < 1 or compresslevel > 9:
- raise OperationError(space.w_ValueError,
- space.wrap("compresslevel must be between 1 and 9"))
+ raise oefmt(space.w_ValueError,
+ "compresslevel must be between 1 and 9")
with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
in_bufsize = len(data)
@@ -770,8 +765,8 @@
if rffi.getintfield(bzs, 'c_avail_in') == 0:
BZ2_bzDecompressEnd(bzs)
- raise OperationError(space.w_ValueError, space.wrap(
- "couldn't find end of stream"))
+ raise oefmt(space.w_ValueError,
+ "couldn't find end of stream")
elif rffi.getintfield(bzs, 'c_avail_out') == 0:
out.prepare_next_chunk()
diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py
--- a/pypy/module/cStringIO/interp_stringio.py
+++ b/pypy/module/cStringIO/interp_stringio.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
@@ -19,8 +19,7 @@
def check_closed(self):
if self.is_closed():
space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("I/O operation on closed file"))
+ raise oefmt(space.w_ValueError, "I/O operation on closed file")
def descr_flush(self):
self.check_closed()
@@ -160,7 +159,7 @@
else:
size = space.int_w(w_size)
if size < 0:
- raise OperationError(space.w_IOError, space.wrap("negative size"))
+ raise oefmt(space.w_IOError, "negative size")
self.truncate(size)
def descr_write(self, space, w_buffer):
diff --git a/pypy/module/cmath/interp_cmath.py b/pypy/module/cmath/interp_cmath.py
--- a/pypy/module/cmath/interp_cmath.py
+++ b/pypy/module/cmath/interp_cmath.py
@@ -1,7 +1,7 @@
import math
from rpython.rlib.objectmodel import specialize
from rpython.tool.sourcetools import func_with_new_name
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cmath import names_and_docstrings
from rpython.rlib import rcomplex
@@ -14,11 +14,9 @@
try:
result = c_func(x, y)
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
return result
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -3,7 +3,7 @@
from rpython.rlib.rarithmetic import r_singlefloat
from rpython.tool import leakfinder
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc
@@ -240,8 +240,8 @@
load_reflection_library(space)
except Exception:
if objectmodel.we_are_translated():
- raise OperationError(space.w_ImportError,
- space.wrap("missing reflection library %s" % reflection_library))
+ raise oefmt(space.w_ImportError,
+ "missing reflection library %s", reflection_library)
return False
return True
diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py
--- a/pypy/module/cppyy/converter.py
+++ b/pypy/module/cppyy/converter.py
@@ -100,7 +100,8 @@
return fieldptr
def _is_abstract(self, space):
- raise OperationError(space.w_TypeError, space.wrap("no converter available for '%s'" % self.name))
+ raise oefmt(space.w_TypeError,
+ "no converter available for '%s'", self.name)
def convert_argument(self, space, w_obj, address, call_local):
self._is_abstract(space)
@@ -181,14 +182,15 @@
def convert_argument(self, space, w_obj, address, call_local):
w_tc = space.findattr(w_obj, space.wrap('typecode'))
if w_tc is not None and space.str_w(w_tc) != self.typecode:
- msg = "expected %s pointer type, but received %s" % (self.typecode, space.str_w(w_tc))
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "expected %s pointer type, but received %s",
+ self.typecode, space.str_w(w_tc))
x = rffi.cast(rffi.VOIDPP, address)
try:
x[0] = rffi.cast(rffi.VOIDP, get_rawbuffer(space, w_obj))
except TypeError:
- raise OperationError(space.w_TypeError,
- space.wrap("raw buffer interface not supported"))
+ raise oefmt(space.w_TypeError,
+ "raw buffer interface not supported")
ba = rffi.cast(rffi.CCHARP, address)
ba[capi.c_function_arg_typeoffset(space)] = 'o'
@@ -208,8 +210,8 @@
try:
byteptr[0] = buf.get_raw_address()
except ValueError:
- raise OperationError(space.w_TypeError,
- space.wrap("raw buffer interface not supported"))
+ raise oefmt(space.w_TypeError,
+ "raw buffer interface not supported")
class NumericTypeConverterMixin(object):
@@ -464,8 +466,8 @@
offset = capi.c_base_offset(space, w_obj.cppclass, self.cppclass, rawobject, 1)
obj_address = capi.direct_ptradd(rawobject, offset)
return rffi.cast(capi.C_OBJECT, obj_address)
- raise oefmt(space.w_TypeError, "cannot pass %T as %s",
- w_obj, self.cppclass.name)
+ raise oefmt(space.w_TypeError,
+ "cannot pass %T as %s", w_obj, self.cppclass.name)
def convert_argument(self, space, w_obj, address, call_local):
x = rffi.cast(rffi.VOIDPP, address)
diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py
--- a/pypy/module/cppyy/executor.py
+++ b/pypy/module/cppyy/executor.py
@@ -1,6 +1,6 @@
import sys
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import jit_libffi
@@ -35,8 +35,8 @@
pass
def execute(self, space, cppmethod, cppthis, num_args, args):
- raise OperationError(space.w_TypeError,
- space.wrap('return type not available or supported'))
+ raise oefmt(space.w_TypeError,
+ "return type not available or supported")
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
from pypy.module.cppyy.interp_cppyy import FastCallNotPossible
diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py
--- a/pypy/module/cppyy/ffitypes.py
+++ b/pypy/module/cppyy/ffitypes.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi
from rpython.rlib.rarithmetic import r_singlefloat
@@ -21,8 +21,8 @@
def _unwrap_object(self, space, w_obj):
arg = space.c_int_w(w_obj)
if arg != False and arg != True:
- raise OperationError(space.w_ValueError,
- space.wrap("boolean value should be bool, or integer 1 or 0"))
+ raise oefmt(space.w_ValueError,
+ "boolean value should be bool, or integer 1 or 0")
return arg
def _wrap_object(self, space, obj):
@@ -41,16 +41,15 @@
if space.isinstance_w(w_value, space.w_int):
ival = space.c_int_w(w_value)
if ival < 0 or 256 <= ival:
- raise OperationError(space.w_ValueError,
- space.wrap("char arg not in range(256)"))
+ raise oefmt(space.w_ValueError, "char arg not in range(256)")
value = rffi.cast(rffi.CHAR, space.c_int_w(w_value))
else:
value = space.str_w(w_value)
if len(value) != 1:
- raise OperationError(space.w_ValueError,
- space.wrap("char expected, got string of size %d" % len(value)))
+ raise oefmt(space.w_ValueError,
+ "char expected, got string of size %d", len(value))
return value[0] # turn it into a "char" to the annotator
class ShortTypeMixin(object):
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1,6 +1,6 @@
import pypy.module.cppyy.capi as capi
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.baseobjspace import W_Root
@@ -195,8 +195,7 @@
args_expected = len(self.arg_defs)
args_given = len(args_w)
if args_expected < args_given or args_given < self.args_required:
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("wrong number of arguments"))
+ raise oefmt(self.space.w_TypeError, "wrong number of arguments")
# initial setup of converters, executors, and libffi (if available)
if self.converters is None:
@@ -435,8 +434,9 @@
s = self.space.str_w(self.space.getattr(args_w[i], self.space.wrap('__name__')))
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
+ raise oefmt(self.space.w_TypeError,
+ "non-matching template (got %s where %s expected)",
+ s, self.templ_args[i])
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
@@ -646,14 +646,16 @@
def get(self, w_cppinstance, w_pycppclass):
cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
if not cppinstance:
- raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance"))
+ raise oefmt(self.space.w_ReferenceError,
+ "attribute access requires an instance")
offset = self._get_offset(cppinstance)
return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset)
def set(self, w_cppinstance, w_value):
cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
if not cppinstance:
- raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance"))
+ raise oefmt(self.space.w_ReferenceError,
+ "attribute access requires an instance")
offset = self._get_offset(cppinstance)
self.converter.to_memory(self.space, w_cppinstance, w_value, offset)
return self.space.w_None
@@ -777,12 +779,12 @@
for f in overload.functions:
if 0 < f.signature().find(sig):
return W_CPPOverload(self.space, self, [f])
- raise OperationError(self.space.w_TypeError, self.space.wrap("no overload matches signature"))
+ raise oefmt(self.space.w_TypeError, "no overload matches signature")
def missing_attribute_error(self, name):
- return OperationError(
- self.space.w_AttributeError,
- self.space.wrap("%s '%s' has no attribute %s" % (self.kind, self.name, name)))
+ return oefmt(self.space.w_AttributeError,
+ "%s '%s' has no attribute %s",
+ self.kind, self.name, name)
def __eq__(self, other):
return self.handle == other.handle
@@ -1033,8 +1035,8 @@
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
- raise OperationError(self.space.w_ReferenceError,
- self.space.wrap("trying to access a NULL pointer"))
+ raise oefmt(self.space.w_ReferenceError,
+ "trying to access a NULL pointer")
# allow user to determine ownership rules on a per object level
def fget_python_owns(self, space):
@@ -1072,8 +1074,9 @@
except OperationError, e:
if not e.match(self.space, self.space.w_AttributeError):
raise
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name))
+ raise oefmt(self.space.w_TypeError,
+ "cannot instantiate abstract class '%s'",
+ self.cppclass.name)
def instance__eq__(self, w_other):
# special case: if other is None, compare pointer-style
@@ -1122,17 +1125,15 @@
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.len(w_as_builtin)
- raise OperationError(
- self.space.w_TypeError,
- self.space.wrap("'%s' has no length" % self.cppclass.name))
+ raise oefmt(self.space.w_TypeError,
+ "'%s' has no length", self.cppclass.name)
def instance__cmp__(self, w_other):
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.cmp(w_as_builtin, w_other)
- raise OperationError(
- self.space.w_AttributeError,
- self.space.wrap("'%s' has no attribute __cmp__" % self.cppclass.name))
+ raise oefmt(self.space.w_AttributeError,
+ "'%s' has no attribute __cmp__", self.cppclass.name)
def instance__repr__(self):
w_as_builtin = self._get_as_builtin()
@@ -1278,7 +1279,7 @@
if not w_cppclass:
w_cppclass = scope_byname(space, space.str_w(w_pycppclass))
if not w_cppclass:
- raise OperationError(space.w_TypeError,
- space.wrap("no such class: %s" % space.str_w(w_pycppclass)))
+ raise oefmt(space.w_TypeError,
+ "no such class: %s", space.str_w(w_pycppclass))
cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False)
return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -385,9 +385,8 @@
## arg = from_ref(space,
## rffi.cast(PyObject, input_arg))
## except TypeError, e:
- ## err = OperationError(space.w_TypeError,
- ## space.wrap(
- ## "could not cast arg to PyObject"))
+ ## err = oefmt(space.w_TypeError,
+ ## "could not cast arg to PyObject")
## if not catch_exception:
## raise err
## state = space.fromcache(State)
@@ -1644,11 +1643,13 @@
has_error = PyErr_Occurred(space) is not None
has_result = ret is not None
if has_error and has_result:
- raise OperationError(space.w_SystemError, space.wrap(
- "An exception was set, but function returned a value"))
+ raise oefmt(space.w_SystemError,
+ "An exception was set, but function returned a "
+ "value")
elif not expect_null and not has_error and not has_result:
- raise OperationError(space.w_SystemError, space.wrap(
- "Function returned a NULL result without setting an exception"))
+ raise oefmt(space.w_SystemError,
+ "Function returned a NULL result without setting "
+ "an exception")
if has_error:
state = space.fromcache(State)
diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
--- a/pypy/module/cpyext/buffer.py
+++ b/pypy/module/cpyext/buffer.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, CANNOT_FAIL, Py_buffer)
@@ -29,8 +29,8 @@
raise an error if the object can't support a simpler view of its memory.
0 is returned on success and -1 on error."""
- raise OperationError(space.w_TypeError, space.wrap(
- 'PyPy does not yet implement the new buffer interface'))
+ raise oefmt(space.w_TypeError,
+ "PyPy does not yet implement the new buffer interface")
@cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL)
def PyBuffer_IsContiguous(space, view, fortran):
diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py
--- a/pypy/module/cpyext/bufferobject.py
+++ b/pypy/module/cpyext/bufferobject.py
@@ -1,6 +1,6 @@
from rpython.rlib.buffer import StringBuffer, SubBuffer
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import (
cpython_api, Py_ssize_t, cpython_struct, bootstrap_function,
PyObjectFields, PyObject)
@@ -61,16 +61,15 @@
py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, buf.array._charbuf_start())
py_buf.c_b_size = buf.getlength()
else:
- raise OperationError(space.w_NotImplementedError, space.wrap(
- "buffer flavor not supported"))
+ raise oefmt(space.w_NotImplementedError, "buffer flavor not supported")
def buffer_realize(space, py_obj):
"""
Creates the buffer in the PyPy interpreter from a cpyext representation.
"""
- raise OperationError(space.w_NotImplementedError, space.wrap(
- "Don't know how to realize a buffer"))
+ raise oefmt(space.w_NotImplementedError,
+ "Don't know how to realize a buffer")
@cpython_api([PyObject], lltype.Void, header=None)
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
@@ -183,8 +183,8 @@
while ref_str.c_buffer[i] != '\0':
i += 1
if i != ref_str.c_ob_size:
- raise OperationError(space.w_TypeError, space.wrap(
- "expected string without null bytes"))
+ raise oefmt(space.w_TypeError,
+ "expected string without null bytes")
return 0
@cpython_api([PyObject], Py_ssize_t, error=-1)
@@ -211,8 +211,8 @@
# XXX always create a new string so far
py_str = rffi.cast(PyStringObject, ref[0])
if not py_str.c_buffer:
- raise OperationError(space.w_SystemError, space.wrap(
- "_PyString_Resize called on already created string"))
+ raise oefmt(space.w_SystemError,
+ "_PyString_Resize called on already created string")
try:
py_newstr = new_empty_str(space, newsize)
except MemoryError:
diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py
--- a/pypy/module/cpyext/complexobject.py
+++ b/pypy/module/cpyext/complexobject.py
@@ -5,7 +5,7 @@
make_typedescr, track_reference, from_ref)
from pypy.module.cpyext.floatobject import PyFloat_AsDouble
from pypy.objspace.std.complexobject import W_ComplexObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex")
@@ -98,8 +98,8 @@
return 0
if not PyComplex_Check(space, w_obj):
- raise OperationError(space.w_TypeError, space.wrap(
- "__complex__ should return a complex object"))
+ raise oefmt(space.w_TypeError,
+ "__complex__ should return a complex object")
assert isinstance(w_obj, W_ComplexObject)
result.c_real = w_obj.realval
diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py
--- a/pypy/module/cpyext/eval.py
+++ b/pypy/module/cpyext/eval.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.astcompiler import consts
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
@@ -103,8 +103,8 @@
elif start == Py_single_input:
mode = 'single'
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid mode parameter for compilation"))
+ raise oefmt(space.w_ValueError,
+ "invalid mode parameter for compilation")
return compiling.compile(space, w_source, filename, mode, flags)
def run_string(space, source, filename, start, w_globals, w_locals):
diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py
--- a/pypy/module/cpyext/intobject.py
+++ b/pypy/module/cpyext/intobject.py
@@ -1,6 +1,6 @@
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, build_type_checkers, bootstrap_function,
PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t)
@@ -62,8 +62,7 @@
returned, and the caller should check PyErr_Occurred() to find out whether
there was an error, or whether the value just happened to be -1."""
if w_obj is None:
- raise OperationError(space.w_TypeError,
- space.wrap("an integer is required, got NULL"))
+ raise oefmt(space.w_TypeError, "an integer is required, got NULL")
return space.int_w(space.int(w_obj))
@cpython_api([PyObject], lltype.Unsigned, error=-1)
@@ -72,8 +71,7 @@
If pylong is greater than ULONG_MAX, an OverflowError is
raised."""
if w_obj is None:
- raise OperationError(space.w_TypeError,
- space.wrap("an integer is required, got NULL"))
+ raise oefmt(space.w_TypeError, "an integer is required, got NULL")
return space.uint_w(space.int(w_obj))
@@ -118,8 +116,7 @@
Py_ssize_t.
"""
if w_obj is None:
- raise OperationError(space.w_TypeError,
- space.wrap("an integer is required, got NULL"))
+ raise oefmt(space.w_TypeError, "an integer is required, got NULL")
return space.int_w(w_obj) # XXX this is wrong on win64
LONG_MAX = int(LONG_TEST - 1)
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -5,7 +5,7 @@
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref
from pypy.objspace.std.listobject import W_ListObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyList_Check, PyList_CheckExact = build_type_checkers("List")
@@ -52,8 +52,7 @@
if not isinstance(w_list, W_ListObject):
PyErr_BadInternalCall(space)
if index < 0 or index >= w_list.length():
- raise OperationError(space.w_IndexError, space.wrap(
- "list assignment index out of range"))
+ raise oefmt(space.w_IndexError, "list assignment index out of range")
w_list.setitem(index, w_item)
return 0
@@ -66,8 +65,7 @@
if not isinstance(w_list, W_ListObject):
PyErr_BadInternalCall(space)
if index < 0 or index >= w_list.length():
- raise OperationError(space.w_IndexError, space.wrap(
- "list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
w_list.ensure_object_strategy() # make sure we can return a borrowed obj
# XXX ^^^ how does this interact with CPyListStrategy?
w_res = w_list.getitem(index)
@@ -103,8 +101,7 @@
len(list) on a list object.
"""
if not PyList_Check(space, ref):
- raise OperationError(space.w_TypeError,
- space.wrap("expected list object"))
+ raise oefmt(space.w_TypeError, "expected list object")
return PyList_GET_SIZE(space, ref)
@cpython_api([PyObject], PyObject)
diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -73,8 +73,8 @@
flags = rffi.cast(lltype.Signed, self.ml.c_ml_flags)
flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST)
if space.is_true(w_kw) and not flags & METH_KEYWORDS:
- raise OperationError(space.w_TypeError, space.wrap(
- self.name + "() takes no keyword arguments"))
+ raise oefmt(space.w_TypeError,
+ "%s() takes no keyword arguments", self.name)
func = rffi.cast(PyCFunction, self.ml.c_ml_meth)
length = space.int_w(space.len(w_args))
@@ -84,8 +84,8 @@
elif flags & METH_NOARGS:
if length == 0:
return generic_cpy_call(space, func, w_self, None)
- raise OperationError(space.w_TypeError, space.wrap(
- self.name + "() takes no arguments"))
+ raise oefmt(space.w_TypeError,
+ "() takes no arguments", self.name)
elif flags & METH_O:
if length != 1:
raise oefmt(space.w_TypeError,
@@ -280,7 +280,8 @@
cfunction = space.interp_w(W_PyCFunctionObject, w_obj)
except OperationError, e:
if e.match(space, space.w_TypeError):
- raise oefmt(space.w_SystemError, "bad argument to internal function")
+ raise oefmt(space.w_SystemError,
+ "bad argument to internal function")
raise
return cfunction.ml.c_ml_meth
diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py
--- a/pypy/module/cpyext/modsupport.py
+++ b/pypy/module/cpyext/modsupport.py
@@ -8,7 +8,7 @@
PyMethodDef, PyDescr_NewClassMethod, PyStaticMethod_New)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.module.cpyext.state import State
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
#@cpython_api([rffi.CCHARP], PyObject)
def PyImport_AddModule(space, name):
@@ -87,16 +87,17 @@
if w_type is None:
if flags & METH_CLASS or flags & METH_STATIC:
- raise OperationError(space.w_ValueError,
- space.wrap("module functions cannot set METH_CLASS or METH_STATIC"))
+ raise oefmt(space.w_ValueError,
+ "module functions cannot set METH_CLASS or "
+ "METH_STATIC")
w_obj = space.wrap(W_PyCFunctionObject(space, method, w_self, w_name))
else:
if methodname in dict_w and not (flags & METH_COEXIST):
continue
if flags & METH_CLASS:
if flags & METH_STATIC:
- raise OperationError(space.w_ValueError,
- space.wrap("method cannot be both class and static"))
+ raise oefmt(space.w_ValueError,
+ "method cannot be both class and static")
w_obj = PyDescr_NewClassMethod(space, w_type, method)
elif flags & METH_STATIC:
w_func = PyCFunction_NewEx(space, method, None, None)
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -3,7 +3,7 @@
Numpy C-API for PyPy - S. H. Muller, 2013/07/26
"""
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL
from pypy.module.cpyext.api import PyObject
@@ -126,15 +126,16 @@
parameter is NULL.
"""
if requirements not in (0, ARRAY_DEFAULT):
- raise OperationError(space.w_NotImplementedError, space.wrap(
- '_PyArray_FromAny called with not-implemented requirements argument'))
+ raise oefmt(space.w_NotImplementedError,
+ "_PyArray_FromAny called with not-implemented "
+ "requirements argument")
w_array = array(space, w_obj, w_dtype=w_dtype, copy=False)
if min_depth !=0 and len(w_array.get_shape()) < min_depth:
- raise OperationError(space.w_ValueError, space.wrap(
- 'object of too small depth for desired array'))
+ raise oefmt(space.w_ValueError,
+ "object of too small depth for desired array")
elif max_depth !=0 and len(w_array.get_shape()) > max_depth:
- raise OperationError(space.w_ValueError, space.wrap(
- 'object of too deep for desired array'))
+ raise oefmt(space.w_ValueError,
+ "object of too deep for desired array")
elif w_array.is_scalar():
# since PyArray_DATA() fails on scalars, create a 1D array and set empty
# shape. So the following combination works for *reading* scalars:
@@ -153,25 +154,26 @@
dtype = get_dtype_cache(space).dtypes_by_num[typenum]
return dtype
except KeyError:
- raise OperationError(space.w_ValueError, space.wrap(
- 'PyArray_DescrFromType called with invalid dtype %d' % typenum))
+ raise oefmt(space.w_ValueError,
+ "PyArray_DescrFromType called with invalid dtype %d",
+ typenum)
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject, header=HEADER)
def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth):
try:
dtype = get_dtype_cache(space).dtypes_by_num[typenum]
except KeyError:
- raise OperationError(space.w_ValueError, space.wrap(
- '_PyArray_FromObject called with invalid dtype %d' % typenum))
+ raise oefmt(space.w_ValueError,
+ "_PyArray_FromObject called with invalid dtype %d",
+ typenum)
try:
return _PyArray_FromAny(space, w_obj, dtype, min_depth, max_depth,
0, NULL);
except OperationError, e:
if e.match(space, space.w_NotImplementedError):
errstr = space.str_w(e.get_w_value(space))
- errstr = '_PyArray_FromObject' + errstr[16:]
- raise OperationError(space.w_NotImplementedError, space.wrap(
- errstr))
+ raise oefmt(space.w_NotImplementedError,
+ "_PyArray_FromObject %s", errstr[16:])
raise
def get_shape_and_dtype(space, nd, dims, typenum):
@@ -214,8 +216,7 @@
rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject, header=HEADER)
def _PyArray_New(space, subtype, nd, dims, typenum, strides, data, itemsize, flags, obj):
if strides:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("strides must be NULL"))
+ raise oefmt(space.w_NotImplementedError, "strides must be NULL")
order = CORDER if flags & ARRAY_C_CONTIGUOUS else FORTRANORDER
owning = True if flags & ARRAY_OWNDATA else False
diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py
--- a/pypy/module/cpyext/number.py
+++ b/pypy/module/cpyext/number.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, Py_ssize_t
from pypy.module.cpyext.pyobject import PyObject, PyObjectP, from_ref, make_ref, Py_DecRef
from rpython.rtyper.lltypesystem import rffi, lltype
@@ -154,7 +154,8 @@
@cpython_api([PyObject, PyObject, PyObject], PyObject)
def PyNumber_InPlacePower(space, w_o1, w_o2, w_o3):
if not space.is_w(w_o3, space.w_None):
- raise OperationError(space.w_ValueError, space.wrap(
- "PyNumber_InPlacePower with non-None modulus is not supported"))
+ raise oefmt(space.w_ValueError,
+ "PyNumber_InPlacePower with non-None modulus is not "
+ "supported")
return space.inplace_pow(w_o1, w_o2)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -10,7 +10,7 @@
from pypy.module.cpyext.typeobject import PyTypeObjectPtr
from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall
from pypy.objspace.std.typeobject import W_TypeObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
import pypy.module.__builtin__.operation as operation
@@ -382,17 +382,15 @@
try:
w_meth = space.getattr(w_obj, space.wrap('fileno'))
except OperationError:
- raise OperationError(
- space.w_TypeError, space.wrap(
- "argument must be an int, or have a fileno() method."))
+ raise oefmt(space.w_TypeError,
+ "argument must be an int, or have a fileno() method.")
else:
w_fd = space.call_function(w_meth)
fd = space.int_w(w_fd)
if fd < 0:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "file descriptor cannot be a negative integer"))
+ raise oefmt(space.w_ValueError,
+ "file descriptor cannot be a negative integer")
return rffi.cast(rffi.INT_real, fd)
@@ -415,7 +413,7 @@
allowing a type to explicitly indicate to the interpreter that it is not
hashable.
"""
- raise OperationError(space.w_TypeError, space.wrap("unhashable type"))
+ raise oefmt(space.w_TypeError, "unhashable type")
@cpython_api([PyObject], PyObject)
def PyObject_Dir(space, w_o):
@@ -438,12 +436,11 @@
pb = pto.c_tp_as_buffer
if not (pb and pb.c_bf_getreadbuffer and pb.c_bf_getsegcount):
- raise OperationError(space.w_TypeError, space.wrap(
- "expected a character buffer object"))
+ raise oefmt(space.w_TypeError, "expected a character buffer object")
if generic_cpy_call(space, pb.c_bf_getsegcount,
obj, lltype.nullptr(Py_ssize_tP.TO)) != 1:
- raise OperationError(space.w_TypeError, space.wrap(
- "expected a single-segment buffer object"))
+ raise oefmt(space.w_TypeError,
+ "expected a single-segment buffer object")
size = generic_cpy_call(space, pb.c_bf_getcharbuffer,
obj, 0, bufferp)
if size < 0:
@@ -486,9 +483,7 @@
provides a subset of CPython's behavior.
"""
if flags & PyBUF_WRITABLE and readonly:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "Object is not writable"))
+ raise oefmt(space.w_ValueError, "Object is not writable")
view.c_buf = buf
view.c_len = length
view.c_obj = obj
diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py
--- a/pypy/module/cpyext/pyerrors.py
+++ b/pypy/module/cpyext/pyerrors.py
@@ -1,7 +1,7 @@
import os
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter import pytraceback
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
@@ -110,12 +110,11 @@
argument. It is mostly for internal use. In CPython this function always
raises an exception and returns 0 in all cases, hence the (ab)use of the
error indicator."""
- raise OperationError(space.w_TypeError,
- space.wrap("bad argument type for built-in operation"))
+ raise oefmt(space.w_TypeError, "bad argument type for built-in operation")
@cpython_api([], lltype.Void)
def PyErr_BadInternalCall(space):
- raise OperationError(space.w_SystemError, space.wrap("Bad internal call!"))
+ raise oefmt(space.w_SystemError, "Bad internal call!")
@cpython_api([], PyObject, error=CANNOT_FAIL)
def PyErr_NoMemory(space):
diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py
--- a/pypy/module/cpyext/pystrtod.py
+++ b/pypy/module/cpyext/pystrtod.py
@@ -1,5 +1,5 @@
import errno
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import cpython_api, CONST_STRING
from pypy.module.cpyext.pyobject import PyObject
from rpython.rlib import rdtoa
@@ -63,9 +63,8 @@
endpos = (rffi.cast(rffi.LONG, endptr[0]) -
rffi.cast(rffi.LONG, s))
if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
- raise OperationError(
- space.w_ValueError,
- space.wrap('invalid input at position %s' % endpos))
+ raise oefmt(space.w_ValueError,
+ "invalid input at position %s", endpos)
err = rffi.cast(lltype.Signed, rposix._get_errno())
if err == errno.ERANGE:
rposix._set_errno(rffi.cast(rffi.INT, 0))
@@ -75,8 +74,7 @@
else:
return -rfloat.INFINITY
else:
- raise OperationError(w_overflow_exception,
- space.wrap('value too large'))
+ raise oefmt(w_overflow_exception, "value too large")
return result
finally:
if not user_endptr:
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -63,8 +63,9 @@
return w_obj.getitem(index)
elif isinstance(w_obj, tupleobject.W_TupleObject):
return w_obj.wrappeditems[index]
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_GET_ITEM called but object is not a list or sequence'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_GET_ITEM called but object is not a list or "
+ "sequence")
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
def PySequence_Fast_GET_SIZE(space, w_obj):
@@ -77,8 +78,9 @@
return w_obj.length()
elif isinstance(w_obj, tupleobject.W_TupleObject):
return len(w_obj.wrappeditems)
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_GET_SIZE called but object is not a list or sequence'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_GET_SIZE called but object is not a list or "
+ "sequence")
@cpython_api([PyObject], PyObjectP)
def PySequence_Fast_ITEMS(space, w_obj):
@@ -93,8 +95,9 @@
cpy_strategy = space.fromcache(CPyListStrategy)
if w_obj.strategy is cpy_strategy:
return w_obj.get_raw_items() # asserts it's a cpyext strategy
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_ITEMS called but object is not the result of PySequence_Fast'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_ITEMS called but object is not the result of "
+ "PySequence_Fast")
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject)
def PySequence_GetSlice(space, w_obj, start, end):
@@ -227,8 +230,7 @@
return idx
idx += 1
- raise OperationError(space.w_ValueError, space.wrap(
- "sequence.index(x): x not in sequence"))
+ raise oefmt(space.w_ValueError, "sequence.index(x): x not in sequence")
class CPyListStrategy(ListStrategy):
erase, unerase = rerased.new_erasing_pair("empty")
@@ -263,8 +265,8 @@
def getslice(self, w_list, start, stop, step, length):
#storage = self.unerase(w_list.lstorage)
- raise OperationError(w_list.space.w_NotImplementedError, w_list.space.wrap(
- "settting a slice of a PySequence_Fast is not supported"))
+ raise oefmt(w_list.space.w_NotImplementedError,
+ "settting a slice of a PySequence_Fast is not supported")
def getitems(self, w_list):
# called when switching list strategy, so convert storage
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
--- a/pypy/module/cpyext/setobject.py
+++ b/pypy/module/cpyext/setobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
build_type_checkers)
@@ -85,8 +85,7 @@
len(anyset). Raises a PyExc_SystemError if anyset is not a set, frozenset,
or an instance of a subtype."""
if not PySet_Check(space, ref):
- raise OperationError(space.w_TypeError,
- space.wrap("expected set object"))
+ raise oefmt(space.w_TypeError, "expected set object")
return PySet_GET_SIZE(space, ref)
@cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -35,8 +35,8 @@
def check_num_args(space, w_ob, n):
from pypy.module.cpyext.tupleobject import PyTuple_CheckExact
if not PyTuple_CheckExact(space, w_ob):
- raise OperationError(space.w_SystemError,
- space.wrap("PyArg_UnpackTuple() argument list is not a tuple"))
+ raise oefmt(space.w_SystemError,
+ "PyArg_UnpackTuple() argument list is not a tuple")
if n == space.len_w(w_ob):
return
raise oefmt(space.w_TypeError,
@@ -46,8 +46,8 @@
def check_num_argsv(space, w_ob, low, high):
from pypy.module.cpyext.tupleobject import PyTuple_CheckExact
if not PyTuple_CheckExact(space, w_ob):
- raise OperationError(space.w_SystemError,
- space.wrap("PyArg_UnpackTuple() argument list is not a tuple"))
+ raise oefmt(space.w_SystemError,
+ "PyArg_UnpackTuple() argument list is not a tuple")
if low <=space.len_w(w_ob) <= high:
return
raise oefmt(space.w_TypeError,
@@ -183,9 +183,7 @@
if w_type is space.w_None:
w_type = None
if w_obj is None and w_type is None:
- raise OperationError(
- space.w_TypeError,
- space.wrap("__get__(None, None) is invalid"))
+ raise oefmt(space.w_TypeError, "__get__(None, None) is invalid")
return generic_cpy_call(space, func_target, w_self, w_obj, w_type)
def wrap_descr_set(space, w_self, w_args, func):
diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
--- a/pypy/module/cpyext/state.py
+++ b/pypy/module/cpyext/state.py
@@ -52,8 +52,9 @@
self.clear_exception()
raise operror
if always:
- raise OperationError(self.space.w_SystemError, self.space.wrap(
- "Function returned an error result without setting an exception"))
+ raise oefmt(self.space.w_SystemError,
+ "Function returned an error result without setting an "
+ "exception")
def build_api(self, space):
"""NOT_RPYTHON
diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py
--- a/pypy/module/cpyext/structmember.py
+++ b/pypy/module/cpyext/structmember.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.structmemberdefs import *
@@ -80,8 +80,7 @@
w_name = space.wrap(rffi.charp2str(w_member.c_name))
raise OperationError(space.w_AttributeError, w_name)
else:
- raise OperationError(space.w_SystemError,
- space.wrap("bad memberdescr type"))
+ raise oefmt(space.w_SystemError, "bad memberdescr type")
return w_result
@@ -95,16 +94,15 @@
if (flags & READONLY or
member_type in [T_STRING, T_STRING_INPLACE]):
- raise OperationError(space.w_TypeError,
- space.wrap("readonly attribute"))
+ raise oefmt(space.w_TypeError, "readonly attribute")
elif w_value is None:
if member_type == T_OBJECT_EX:
if not rffi.cast(PyObjectP, addr)[0]:
w_name = space.wrap(rffi.charp2str(w_member.c_name))
raise OperationError(space.w_AttributeError, w_name)
elif member_type != T_OBJECT:
- raise OperationError(space.w_TypeError,
- space.wrap("can't delete numeric/char attribute"))
+ raise oefmt(space.w_TypeError,
+ "can't delete numeric/char attribute")
for converter in integer_converters:
typ, lltyp, getter = converter
@@ -117,8 +115,7 @@
if member_type == T_CHAR:
str_value = space.str_w(w_value)
if len(str_value) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("string of length 1 expected"))
+ raise oefmt(space.w_TypeError, "string of length 1 expected")
array = rffi.cast(rffi.CCHARP, addr)
array[0] = str_value[0]
elif member_type in [T_OBJECT, T_OBJECT_EX]:
@@ -127,6 +124,5 @@
Py_DecRef(space, array[0])
array[0] = make_ref(space, w_value)
else:
- raise OperationError(space.w_SystemError,
- space.wrap("bad memberdescr type"))
+ raise oefmt(space.w_SystemError, "bad memberdescr type")
return 0
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.debug import fatalerror_notb
from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
@@ -142,8 +142,7 @@
ref = rffi.cast(PyTupleObject, ref)
size = ref.c_ob_size
if index < 0 or index >= size:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple assignment index out of range"))
+ raise oefmt(space.w_IndexError, "tuple assignment index out of range")
old_ref = ref.c_ob_item[index]
ref.c_ob_item[index] = py_obj # consumes a reference
if old_ref:
@@ -158,8 +157,7 @@
ref = rffi.cast(PyTupleObject, ref)
size = ref.c_ob_size
if index < 0 or index >= size:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
return ref.c_ob_item[index] # borrowed ref
@cpython_api([PyObject], Py_ssize_t, error=-1)
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -7,7 +7,7 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root, DescrMismatch
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.typedef import (GetSetProperty, TypeDef,
interp_attrproperty, interp_attrproperty, interp2app)
from pypy.module.__builtin__.abstractinst import abstract_issubclass_w
@@ -448,8 +448,8 @@
def str_getreadbuffer(space, w_str, segment, ref):
from pypy.module.cpyext.bytesobject import PyString_AsString
if segment != 0:
- raise OperationError(space.w_SystemError, space.wrap
- ("accessing non-existent string segment"))
+ raise oefmt(space.w_SystemError,
+ "accessing non-existent string segment")
pyref = make_ref(space, w_str)
ref[0] = PyString_AsString(space, pyref)
# Stolen reference: the object has better exist somewhere else
@@ -461,8 +461,8 @@
def str_getcharbuffer(space, w_str, segment, ref):
from pypy.module.cpyext.bytesobject import PyString_AsString
if segment != 0:
- raise OperationError(space.w_SystemError, space.wrap
- ("accessing non-existent string segment"))
+ raise oefmt(space.w_SystemError,
+ "accessing non-existent string segment")
pyref = make_ref(space, w_str)
ref[0] = PyString_AsString(space, pyref)
# Stolen reference: the object has better exist somewhere else
@@ -474,8 +474,8 @@
def buf_getreadbuffer(space, pyref, segment, ref):
from pypy.module.cpyext.bufferobject import PyBufferObject
if segment != 0:
- raise OperationError(space.w_SystemError, space.wrap
- ("accessing non-existent string segment"))
+ raise oefmt(space.w_SystemError,
+ "accessing non-existent string segment")
py_buf = rffi.cast(PyBufferObject, pyref)
ref[0] = py_buf.c_b_ptr
#Py_DecRef(space, pyref)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.unicodedata import unicodedb
from pypy.module.cpyext.api import (
@@ -226,8 +226,7 @@
# Don't use PyUnicode_Check, it will realize the object :-(
w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type))
if not space.is_true(space.issubtype(w_type, space.w_unicode)):
- raise OperationError(space.w_TypeError,
- space.wrap("expected unicode object"))
+ raise oefmt(space.w_TypeError, "expected unicode object")
return PyUnicode_AS_UNICODE(space, ref)
@cpython_api([PyObject], Py_ssize_t, error=-1)
@@ -314,8 +313,8 @@
codec."""
w_str = PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors)
if not PyString_Check(space, w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "encoder did not return a string object"))
+ raise oefmt(space.w_TypeError,
+ "encoder did not return a string object")
return w_str
@cpython_api([PyObject], PyObject)
@@ -400,8 +399,7 @@
All other objects, including Unicode objects, cause a TypeError to be
set."""
if not encoding:
- raise OperationError(space.w_TypeError,
- space.wrap("decoding Unicode is not supported"))
+ raise oefmt(space.w_TypeError, "decoding Unicode is not supported")
w_encoding = space.wrap(rffi.charp2str(encoding))
if errors:
w_errors = space.wrap(rffi.charp2str(errors))
@@ -420,8 +418,7 @@
raise
w_meth = None
if w_meth is None:
- raise OperationError(space.w_TypeError,
- space.wrap("decoding Unicode is not supported"))
+ raise oefmt(space.w_TypeError, "decoding Unicode is not supported")
return space.call_function(w_meth, w_encoding, w_errors)
@cpython_api([CONST_STRING], PyObject)
@@ -459,8 +456,8 @@
# XXX always create a new string so far
py_uni = rffi.cast(PyUnicodeObject, ref[0])
if not py_uni.c_str:
- raise OperationError(space.w_SystemError, space.wrap(
- "PyUnicode_Resize called on already created string"))
+ raise oefmt(space.w_SystemError,
+ "PyUnicode_Resize called on already created string")
try:
py_newuni = new_empty_unicode(space, newsize)
except MemoryError:
diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py
--- a/pypy/module/exceptions/interp_exceptions.py
+++ b/pypy/module/exceptions/interp_exceptions.py
@@ -76,7 +76,7 @@
from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict,
descr_set_dict, descr_del_dict)
from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib import rwin32
@@ -157,7 +157,8 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("setting exceptions's dictionary to a non-dict"))
+ raise oefmt(space.w_TypeError,
+ "setting exceptions's dictionary to a non-dict")
self.w_dict = w_dict
def descr_reduce(self, space):
@@ -177,8 +178,7 @@
if w_msg is not None:
return w_msg
if self.w_message is None:
- raise OperationError(space.w_AttributeError,
- space.wrap("message was deleted"))
+ raise oefmt(space.w_AttributeError, "message was deleted")
msg = "BaseException.message has been deprecated as of Python 2.6"
space.warn(space.wrap(msg), space.w_DeprecationWarning)
return self.w_message
diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py
--- a/pypy/module/fcntl/interp_fcntl.py
+++ b/pypy/module/fcntl/interp_fcntl.py
@@ -1,6 +1,6 @@
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError, wrap_oserror, oefmt
+from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from rpython.rlib import rposix
from rpython.translator.tool.cbuild import ExternalCompilationInfo
@@ -174,8 +174,7 @@
elif op & LOCK_EX:
l_type = F_WRLCK
else:
- raise OperationError(space.w_ValueError,
- space.wrap("unrecognized lock operation"))
+ raise oefmt(space.w_ValueError, "unrecognized lock operation")
op = [F_SETLKW, F_SETLK][int(bool(op & LOCK_NB))]
op = rffi.cast(rffi.INT, op) # C long => C int
@@ -230,9 +229,9 @@
lltype.free(ll_arg, flavor='raw')
if mutate_flag != -1:
- raise OperationError(space.w_TypeError, space.wrap(
- "ioctl requires a file or file descriptor, an integer "
- "and optionally an integer or buffer argument"))
+ raise oefmt(space.w_TypeError,
+ "ioctl requires a file or file descriptor, an integer and "
+ "optionally an integer or buffer argument")
try:
arg = space.getarg_w('s#', w_arg)
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -1,5 +1,5 @@
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rlib import rgc
@@ -39,8 +39,7 @@
def enable_finalizers(space):
if space.user_del_action.finalizers_lock_count == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("finalizers are already enabled"))
+ raise oefmt(space.w_ValueError, "finalizers are already enabled")
space.user_del_action.finalizers_lock_count -= 1
space.user_del_action.fire()
@@ -53,8 +52,7 @@
def dump_heap_stats(space, filename):
tb = rgc._heap_stats()
if not tb:
- raise OperationError(space.w_RuntimeError,
- space.wrap("Wrong GC"))
+ raise oefmt(space.w_RuntimeError, "Wrong GC")
f = open(filename, mode="w")
for i in range(len(tb)):
f.write("%d %d " % (tb[i].count, tb[i].size))
diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py
--- a/pypy/module/gc/referents.py
+++ b/pypy/module/gc/referents.py
@@ -2,7 +2,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import wrap_oserror, OperationError
+from pypy.interpreter.error import oefmt, wrap_oserror
from rpython.rlib.objectmodel import we_are_translated
@@ -41,8 +41,8 @@
return gcref
def missing_operation(space):
- return OperationError(space.w_NotImplementedError,
- space.wrap("operation not implemented by this GC"))
+ return oefmt(space.w_NotImplementedError,
+ "operation not implemented by this GC")
# ____________________________________________________________
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -156,8 +156,7 @@
except OperationError, e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_ValueError, space.wrap(
- "__package__ set to non-string"))
+ raise oefmt(space.w_ValueError, "__package__ set to non-string")
if ctxt_package is not None:
# __package__ is set, so use it
@@ -167,10 +166,11 @@
dot_position = _get_dot_position(ctxt_package, level - 1)
if dot_position < 0:
if len(ctxt_package) == 0:
- msg = "Attempted relative import in non-package"
+ where = "in non-package"
else:
- msg = "Attempted relative import beyond toplevel package"
- raise OperationError(space.w_ValueError, w(msg))
+ where = "beyond toplevel package"
+ raise oefmt(space.w_ValueError,
+ "Attempted relative import %s", where)
# Try to import parent package
try:
@@ -179,9 +179,9 @@
if not e.match(space, space.w_ImportError):
raise
if level > 0:
- raise OperationError(space.w_SystemError, space.wrap(
- "Parent module '%s' not loaded, "
- "cannot perform relative import" % ctxt_package))
+ raise oefmt(space.w_SystemError,
+ "Parent module '%s' not loaded, cannot perform "
+ "relative import", ctxt_package)
else:
msg = ("Parent module '%s' not found while handling absolute "
"import" % ctxt_package)
@@ -214,8 +214,8 @@
dot_position = _get_dot_position(ctxt_name, m)
if dot_position < 0:
if level > 0:
- msg = "Attempted relative import in non-package"
- raise OperationError(space.w_ValueError, w(msg))
+ raise oefmt(space.w_ValueError,
+ "Attempted relative import in non-package")
rel_modulename = ''
rel_level = 0
else:
@@ -248,9 +248,7 @@
w_locals=None, w_fromlist=None, level=-1):
modulename = name
if not modulename and level < 0:
- raise OperationError(
- space.w_ValueError,
- space.wrap("Empty module name"))
+ raise oefmt(space.w_ValueError, "Empty module name")
w = space.wrap
if w_fromlist is not None and space.is_true(w_fromlist):
@@ -364,8 +362,8 @@
w = space.wrap
if '/' in modulename or '\\' in modulename:
- raise OperationError(space.w_ImportError, space.wrap(
- "Import by filename is not supported."))
+ raise oefmt(space.w_ImportError,
+ "Import by filename is not supported.")
w_mod = None
parts = modulename.split('.')
@@ -461,8 +459,7 @@
@unwrap_spec(path='str0')
def descr_init(self, space, path):
if not path:
- raise OperationError(space.w_ImportError, space.wrap(
- "empty pathname"))
+ raise oefmt(space.w_ImportError, "empty pathname")
# Directory should not exist
try:
@@ -471,8 +468,7 @@
pass
else:
if stat.S_ISDIR(st.st_mode):
- raise OperationError(space.w_ImportError, space.wrap(
- "existing directory"))
+ raise oefmt(space.w_ImportError, "existing directory")
def find_module_w(self, space, __args__):
return space.wrap(None)
@@ -700,9 +696,7 @@
"""Reload the module.
The module must have been successfully imported before."""
if not space.is_w(space.type(w_module), space.type(space.sys)):
- raise OperationError(
- space.w_TypeError,
- space.wrap("reload() argument must be module"))
+ raise oefmt(space.w_TypeError, "reload() argument must be module")
w_modulename = space.getattr(w_module, space.wrap("__name__"))
modulename = space.str0_w(w_modulename)
@@ -806,8 +800,7 @@
if self.lock is None: # CannotHaveLock occurred
return
space = self.space
- raise OperationError(space.w_RuntimeError,
- space.wrap("not holding the import lock"))
+ raise oefmt(space.w_RuntimeError, "not holding the import lock")
assert self.lockcounter > 0
self.lockcounter -= 1
if self.lockcounter == 0:
diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
--- a/pypy/module/imp/interp_imp.py
+++ b/pypy/module/imp/interp_imp.py
@@ -2,7 +2,7 @@
from pypy.module._file.interp_file import W_File
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from pypy.interpreter.module import Module
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.streamutil import wrap_streamerror
@@ -129,8 +129,7 @@
@unwrap_spec(filename=str)
def load_dynamic(space, w_modulename, filename, w_file=None):
if not importing.has_so_extension(space):
- raise OperationError(space.w_ImportError, space.wrap(
- "Not implemented"))
+ raise oefmt(space.w_ImportError, "Not implemented")
importing.load_c_extension(space, filename, space.str_w(w_modulename))
return importing.check_sys_modules(space, w_modulename)
@@ -142,9 +141,8 @@
if name not in space.builtin_modules:
return
if space.finditem(space.sys.get('modules'), w_name) is not None:
- raise OperationError(
- space.w_ImportError,
- space.wrap("cannot initialize a built-in module twice in PyPy"))
+ raise oefmt(space.w_ImportError,
+ "cannot initialize a built-in module twice in PyPy")
return space.getbuiltinmodule(name)
def init_frozen(space, w_name):
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, make_weakref_descr
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from rpython.rlib import jit
@@ -46,8 +46,7 @@
def check_number(space, w_obj):
if (space.lookup(w_obj, '__int__') is None and
space.lookup(w_obj, '__float__') is None):
- raise OperationError(space.w_TypeError,
- space.wrap("expected a number"))
+ raise oefmt(space.w_TypeError, "expected a number")
@unwrap_spec(w_start=WrappedDefault(0), w_step=WrappedDefault(1))
def W_Count___new__(space, w_subtype, w_start, w_step):
@@ -346,7 +345,9 @@
"Indicies for islice() must be None or non-negative integers")
w_stop = args_w[0]
else:
- raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)"))
+ raise oefmt(space.w_TypeError,
+ "islice() takes at most 4 arguments (%d given)",
+ num_args)
if space.is_w(w_stop, space.w_None):
stop = -1
@@ -540,7 +541,9 @@
iterator_w = space.iter(iterable_w)
except OperationError, e:
if e.match(self.space, self.space.w_TypeError):
- raise OperationError(space.w_TypeError, space.wrap(self._error_name + " argument #" + str(i + 1) + " must support iteration"))
+ raise oefmt(space.w_TypeError,
+ "%s argument #%d must support iteration",
+ self._error_name, i + 1)
else:
raise
else:
@@ -577,8 +580,8 @@
def W_IMap___new__(space, w_subtype, w_fun, args_w):
if len(args_w) == 0:
- raise OperationError(space.w_TypeError,
- space.wrap("imap() must have at least two arguments"))
+ raise oefmt(space.w_TypeError,
+ "imap() must have at least two arguments")
r = space.allocate_instance(W_IMap, w_subtype)
r.__init__(space, w_fun, args_w)
return space.wrap(r)
@@ -690,8 +693,8 @@
w_fillvalue = kwds_w["fillvalue"]
del kwds_w["fillvalue"]
if kwds_w:
- raise OperationError(space.w_TypeError, space.wrap(
- "izip_longest() got unexpected keyword argument(s)"))
+ raise oefmt(space.w_TypeError,
+ "izip_longest() got unexpected keyword argument(s)")
self = space.allocate_instance(W_IZipLongest, w_subtype)
self.__init__(space, space.w_None, arguments_w)
@@ -847,7 +850,7 @@
return tuple([gen(it.next) for i in range(n)])
"""
if n < 0:
- raise OperationError(space.w_ValueError, space.wrap("n must be >= 0"))
+ raise oefmt(space.w_ValueError, "n must be >= 0")
if isinstance(w_iterable, W_TeeIterable): # optimization only
chained_list = w_iterable.chained_list
@@ -1167,8 +1170,8 @@
w_repeat = kwds_w['repeat']
del kwds_w['repeat']
if kwds_w:
- raise OperationError(space.w_TypeError, space.wrap(
- "product() got unexpected keyword argument(s)"))
+ raise oefmt(space.w_TypeError,
+ "product() got unexpected keyword argument(s)")
r = space.allocate_instance(W_Product, w_subtype)
r.__init__(space, arguments_w, w_repeat)
@@ -1270,9 +1273,7 @@
def W_Combinations__new__(space, w_subtype, w_iterable, r):
pool_w = space.fixedview(w_iterable)
if r < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("r must be non-negative")
- )
+ raise oefmt(space.w_ValueError, "r must be non-negative")
indices = range(len(pool_w))
res = space.allocate_instance(W_Combinations, w_subtype)
res.__init__(space, pool_w, indices, r)
@@ -1305,8 +1306,7 @@
def W_CombinationsWithReplacement__new__(space, w_subtype, w_iterable, r):
pool_w = space.fixedview(w_iterable)
if r < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("r must be non-negative"))
+ raise oefmt(space.w_ValueError, "r must be non-negative")
indices = [0] * r
res = space.allocate_instance(W_CombinationsWithReplacement, w_subtype)
res.__init__(space, pool_w, indices, r)
diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
--- a/pypy/module/marshal/interp_marshal.py
+++ b/pypy/module/marshal/interp_marshal.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import WrappedDefault, unwrap_spec
from rpython.rlib.rarithmetic import intmask
from rpython.rlib import rstackovf
@@ -60,8 +60,7 @@
def raise_eof(self):
space = self.space
- raise OperationError(space.w_EOFError, space.wrap(
- 'EOF read where object expected'))
+ raise oefmt(space.w_EOFError, "EOF read where object expected")
def finished(self):
pass
@@ -81,8 +80,8 @@
except OperationError, e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- 'marshal.dump() 2nd arg must be file-like object'))
+ raise oefmt(space.w_TypeError,
+ "marshal.dump() 2nd arg must be file-like object")
def write(self, data):
space = self.space
@@ -98,8 +97,8 @@
except OperationError, e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- 'marshal.load() arg must be file-like object'))
+ raise oefmt(space.w_TypeError,
+ "marshal.load() arg must be file-like object")
def read(self, n):
space = self.space
@@ -416,8 +415,7 @@
tc = self.get1()
w_ret = self._dispatch[ord(tc)](space, self, tc)
if w_ret is None and not allow_null:
- raise OperationError(space.w_TypeError, space.wrap(
- 'NULL object in marshal data'))
+ raise oefmt(space.w_TypeError, "NULL object in marshal data")
return w_ret
def load_w_obj(self):
@@ -442,8 +440,7 @@
res_w[idx] = w_ret
idx += 1
if w_ret is None:
- raise OperationError(space.w_TypeError, space.wrap(
- 'NULL object in marshal data'))
+ raise oefmt(space.w_TypeError, "NULL object in marshal data")
return res_w
def get_list_w(self):
@@ -463,8 +460,7 @@
def raise_eof(self):
space = self.space
- raise OperationError(space.w_EOFError, space.wrap(
- 'EOF read where object expected'))
+ raise oefmt(space.w_EOFError, "EOF read where object expected")
def get(self, n):
pos = self.bufpos
diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py
--- a/pypy/module/math/interp_math.py
+++ b/pypy/module/math/interp_math.py
@@ -2,7 +2,7 @@
import sys
from rpython.rlib import rfloat
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class State:
def __init__(self, space):
@@ -22,11 +22,9 @@
try:
From pypy.commits at gmail.com Mon May 2 02:55:17 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 23:55:17 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: fix
Message-ID: <5726f9d5.8a37c20a.31ded.4e97@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84123:c665430f23c3
Date: 2016-05-01 23:54 -0700
http://bitbucket.org/pypy/pypy/changeset/c665430f23c3/
Log: fix
diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -85,7 +85,7 @@
if length == 0:
return generic_cpy_call(space, func, w_self, None)
raise oefmt(space.w_TypeError,
- "() takes no arguments", self.name)
+ "%s() takes no arguments", self.name)
elif flags & METH_O:
if length != 1:
raise oefmt(space.w_TypeError,
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -163,7 +163,7 @@
assert space.int_w(space.getitem(w_l, space.wrap(1))) == 2
assert space.int_w(space.getitem(w_l, space.wrap(0))) == 1
e = py.test.raises(OperationError, space.getitem, w_l, space.wrap(15))
- assert "list index out of range" in e.exconly()
+ assert "list index out of range" in e.value.errorstr(space)
assert space.int_w(space.getitem(w_l, space.wrap(-1))) == 4
space.setitem(w_l, space.wrap(1), space.wrap(13))
assert space.int_w(space.getitem(w_l, space.wrap(1))) == 13
From pypy.commits at gmail.com Mon May 2 02:55:15 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 01 May 2016 23:55:15 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: missing import
Message-ID: <5726f9d3.e7bec20a.de053.4929@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84122:21f31b2d5357
Date: 2016-05-01 23:54 -0700
http://bitbucket.org/pypy/pypy/changeset/21f31b2d5357/
Log: missing import
diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
--- a/pypy/module/cpyext/state.py
+++ b/pypy/module/cpyext/state.py
@@ -1,6 +1,6 @@
from rpython.rlib.objectmodel import we_are_translated
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.executioncontext import AsyncAction
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import llhelper
From pypy.commits at gmail.com Mon May 2 03:05:55 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 00:05:55 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: missing imports
Message-ID: <5726fc53.c42e1c0a.ad2b6.62b9@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84124:cb8a5b37e917
Date: 2016-05-02 00:05 -0700
http://bitbucket.org/pypy/pypy/changeset/cb8a5b37e917/
Log: missing imports
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import oefmt
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.argument import Arguments
From pypy.commits at gmail.com Mon May 2 03:40:53 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 00:40:53 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: fix
Message-ID: <57270485.4412c30a.fec01.4e8d@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84125:f3c78c2276bd
Date: 2016-05-02 00:39 -0700
http://bitbucket.org/pypy/pypy/changeset/f3c78c2276bd/
Log: fix
diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py
--- a/pypy/module/cpyext/pystrtod.py
+++ b/pypy/module/cpyext/pystrtod.py
@@ -64,7 +64,7 @@
rffi.cast(rffi.LONG, s))
if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
raise oefmt(space.w_ValueError,
- "invalid input at position %s", endpos)
+ "invalid input at position %d", endpos)
err = rffi.cast(lltype.Signed, rposix._get_errno())
if err == errno.ERANGE:
rposix._set_errno(rffi.cast(rffi.INT, 0))
From pypy.commits at gmail.com Mon May 2 11:03:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 08:03:04 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: update the docs to mention oefmt()
instead of OperationError()
Message-ID: <57276c28.a1ccc20a.fd2a9.fffffe15@mx.google.com>
Author: Armin Rigo
Branch: oefmt
Changeset: r84126:81bcc496d12e
Date: 2016-05-02 17:02 +0200
http://bitbucket.org/pypy/pypy/changeset/81bcc496d12e/
Log: update the docs to mention oefmt() instead of OperationError()
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
From pypy.commits at gmail.com Mon May 2 11:53:48 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 08:53:48 -0700 (PDT)
Subject: [pypy-commit] pypy default: Use modern syntax to reduce diff with
py3k branch
Message-ID: <5727780c.e873c20a.f4bb.2642@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84127:2135a4bc384f
Date: 2016-05-02 16:51 +0100
http://bitbucket.org/pypy/pypy/changeset/2135a4bc384f/
Log: Use modern syntax to reduce diff with py3k branch
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -143,7 +143,6 @@
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
- #import pdb; pdb.set_trace()
assert d.meth() == 1
assert e.meth() == 1
assert d.a == 2
@@ -184,7 +183,7 @@
try:
D.__bases__ = ()
- except TypeError, msg:
+ except TypeError as msg:
if str(msg) == "a new-style class can't have only classic bases":
assert 0, "wrong error message for .__bases__ = ()"
else:
@@ -309,7 +308,7 @@
except TypeError:
pass
else:
- raise TestFailed, "didn't catch MRO conflict"
+ raise TestFailed("didn't catch MRO conflict")
def test_mutable_bases_versus_nonheap_types(self):
class A(int):
@@ -442,7 +441,7 @@
except TypeError:
pass
else:
- raise AssertionError, "this multiple inheritance should fail"
+ raise AssertionError("this multiple inheritance should fail")
def test_outer_metaclass(self):
class OuterMetaClass(type):
@@ -512,7 +511,7 @@
try:
assert NoDoc.__doc__ == None
except AttributeError:
- raise AssertionError, "__doc__ missing!"
+ raise AssertionError("__doc__ missing!")
def test_explicitdoc(self):
class ExplicitDoc(object):
@@ -539,7 +538,7 @@
# we always raise AttributeError.
pass
else:
- raise AssertionError, '__doc__ should not be writable'
+ raise AssertionError('__doc__ should not be writable')
assert ImmutableDoc.__doc__ == 'foo'
@@ -1048,14 +1047,14 @@
try:
class E(B, A): # "best base" is B
__slots__ = ("__dict__",)
- except TypeError, e:
+ except TypeError as e:
assert 'we already got one' in str(e)
else:
raise AssertionError("TypeError not raised")
try:
class F(B, A): # "best base" is B
__slots__ = ("__weakref__",)
- except TypeError, e:
+ except TypeError as e:
assert 'we already got one' in str(e)
else:
raise AssertionError("TypeError not raised")
From pypy.commits at gmail.com Mon May 2 12:03:21 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 09:03:21 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix for the (probably never-occurring)
case of a malloc of a fixed-size
Message-ID: <57277a49.8bd31c0a.50429.4c12@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84128:e1a3497e6ab1
Date: 2016-05-02 18:03 +0200
http://bitbucket.org/pypy/pypy/changeset/e1a3497e6ab1/
Log: Fix for the (probably never-occurring) case of a malloc of a fixed-
size but very big object with a lightweight destructor
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -639,13 +639,14 @@
# Build the object.
llarena.arena_reserve(result, totalsize)
obj = result + size_gc_header
- if is_finalizer_light:
- self.young_objects_with_light_finalizers.append(obj)
self.init_gc_object(result, typeid, flags=0)
- #
- # If it is a weakref, record it (check constant-folded).
- if contains_weakptr:
- self.young_objects_with_weakrefs.append(obj)
+ #
+ # If it is a weakref or has a lightweight finalizer, record it
+ # (checks constant-folded).
+ if is_finalizer_light:
+ self.young_objects_with_light_finalizers.append(obj)
+ if contains_weakptr:
+ self.young_objects_with_weakrefs.append(obj)
#
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
From pypy.commits at gmail.com Mon May 2 12:57:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 09:57:33 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: in-progress
Message-ID: <572786fd.d5da1c0a.7a53a.51d2@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84129:7edef1bf570b
Date: 2016-05-02 18:57 +0200
http://bitbucket.org/pypy/pypy/changeset/7edef1bf570b/
Log: in-progress
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -90,10 +90,10 @@
To find the queued items, call ``fin.next_dead()`` repeatedly. It
returns the next queued item, or ``None`` when the queue is empty.
-It is not allowed to cumulate several ``FinalizerQueue`` instances for
-objects of the same class. Calling ``fin.register_finalizer(obj)``
-several times with the same arguments is fine (and will only register
-``obj`` once).
+It is allowed in theory to cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
Ordering of finalizers
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -60,8 +60,7 @@
def set_query_functions(self, is_varsize, has_gcptr_in_varsize,
is_gcarrayofgcptr,
- getfinalizer,
- getlightfinalizer,
+ destructor_or_custom_trace,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
varsize_offset_to_variable_part,
@@ -74,8 +73,7 @@
fast_path_tracing,
has_gcptr,
cannot_pin):
- self.getfinalizer = getfinalizer
- self.getlightfinalizer = getlightfinalizer
+ self.destructor_or_custom_trace = destructor_or_custom_trace
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
self.is_gcarrayofgcptr = is_gcarrayofgcptr
@@ -136,13 +134,13 @@
the four malloc_[fixed,var]size[_clear]() functions.
"""
size = self.fixed_size(typeid)
- needs_finalizer = bool(self.getfinalizer(typeid))
- finalizer_is_light = bool(self.getlightfinalizer(typeid))
+ needs_destructor = (bool(self.destructor_or_custom_trace(typeid))
+ and not self.has_custom_trace(typeid))
contains_weakptr = self.weakpointer_offset(typeid) >= 0
- assert not (needs_finalizer and contains_weakptr)
+ assert not (needs_destructor and contains_weakptr)
if self.is_varsize(typeid):
assert not contains_weakptr
- assert not needs_finalizer
+ assert not needs_destructor
itemsize = self.varsize_item_sizes(typeid)
offset_to_length = self.varsize_offset_to_length(typeid)
if self.malloc_zero_filled:
@@ -157,8 +155,7 @@
malloc_fixedsize = self.malloc_fixedsize_clear
else:
malloc_fixedsize = self.malloc_fixedsize
- ref = malloc_fixedsize(typeid, size, needs_finalizer,
- finalizer_is_light,
+ ref = malloc_fixedsize(typeid, size, needs_destructor,
contains_weakptr)
# lots of cast and reverse-cast around...
ref = llmemory.cast_ptr_to_adr(ref)
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -372,10 +372,19 @@
self.gc_state = STATE_SCANNING
#
- # A list of all objects with finalizers (these are never young).
- self.objects_with_finalizers = self.AddressDeque()
- self.young_objects_with_light_finalizers = self.AddressStack()
- self.old_objects_with_light_finalizers = self.AddressStack()
+ # Two lists of all objects with finalizers. Actually they are lists
+ # of pairs (finalization_queue_nr, object). "probably young objects"
+ # are all traced and moved to the "old" list by the next minor
+ # collection.
+ self.probably_young_objects_with_finalizers = self.AddressDeque()
+ self.old_objects_with_finalizers = self.AddressDeque()
+ p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
+ track_allocation=False)
+ self.singleaddr = llmemory.cast_ptr_to_adr(p)
+ #
+ # Two lists of all objects with destructors.
+ self.young_objects_with_destructors = self.AddressStack()
+ self.old_objects_with_destructors = self.AddressStack()
#
# Two lists of the objects with weakrefs. No weakref can be an
# old object weakly pointing to a young object: indeed, weakrefs
@@ -599,25 +608,16 @@
def malloc_fixedsize(self, typeid, size,
- needs_finalizer=False,
- is_finalizer_light=False,
+ needs_destructor=False,
contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
rawtotalsize = raw_malloc_usage(totalsize)
#
- # If the object needs a finalizer, ask for a rawmalloc.
- # The following check should be constant-folded.
- if needs_finalizer and not is_finalizer_light:
- ll_assert(not contains_weakptr,
- "'needs_finalizer' and 'contains_weakptr' both specified")
- obj = self.external_malloc(typeid, 0, alloc_young=False)
- self.objects_with_finalizers.append(obj)
- #
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
# should be constant-folded.
- elif rawtotalsize > self.nonlarge_max:
+ if rawtotalsize > self.nonlarge_max:
ll_assert(not contains_weakptr,
"'contains_weakptr' specified for a large object")
obj = self.external_malloc(typeid, 0, alloc_young=True)
@@ -639,14 +639,14 @@
# Build the object.
llarena.arena_reserve(result, totalsize)
obj = result + size_gc_header
- if is_finalizer_light:
- self.young_objects_with_light_finalizers.append(obj)
self.init_gc_object(result, typeid, flags=0)
- #
- # If it is a weakref, record it (check constant-folded).
- if contains_weakptr:
- self.young_objects_with_weakrefs.append(obj)
#
+ # If it is a weakref or has a lightweight destructor, record it
+ # (checks constant-folded).
+ if needs_destructor:
+ self.young_objects_with_destructors.append(obj)
+ if contains_weakptr:
+ self.young_objects_with_weakrefs.append(obj)
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
@@ -1632,6 +1632,11 @@
if self.rrc_enabled:
self.rrc_minor_collection_trace()
#
+ # visit the "probably young" objects with finalizers. They
+ # always all survive.
+ if self.probably_young_objects_with_finalizers.non_empty():
+ self.deal_with_young_objects_with_finalizers()
+ #
while True:
# If we are using card marking, do a partial trace of the arrays
# that are flagged with GCFLAG_CARDS_SET.
@@ -1657,8 +1662,8 @@
# weakrefs' targets.
if self.young_objects_with_weakrefs.non_empty():
self.invalidate_young_weakrefs()
- if self.young_objects_with_light_finalizers.non_empty():
- self.deal_with_young_objects_with_finalizers()
+ if self.young_objects_with_destructors.non_empty():
+ self.deal_with_young_objects_with_destructors()
#
# Clear this mapping. Without pinned objects we just clear the dict
# as all objects in the nursery are dragged out of the nursery and, if
@@ -2220,7 +2225,10 @@
if self.rrc_enabled:
self.rrc_major_collection_trace()
#
- if self.objects_with_finalizers.non_empty():
+ ll_assert(not (self.probably_young_objects_with_finalizers
+ .non_empty()),
+ "probably_young_objects_with_finalizers should be empty")
+ if self.old_objects_with_finalizers.non_empty():
self.deal_with_objects_with_finalizers()
elif self.old_objects_with_weakrefs.non_empty():
# Weakref support: clear the weak pointers to dying objects
@@ -2236,9 +2244,9 @@
self.more_objects_to_trace.delete()
#
- # Light finalizers
- if self.old_objects_with_light_finalizers.non_empty():
- self.deal_with_old_objects_with_finalizers()
+ # Destructors
+ if self.old_objects_with_destructors.non_empty():
+ self.deal_with_old_objects_with_destructors()
# objects_to_trace processed fully, can move on to sweeping
self.ac.mass_free_prepare()
self.start_free_rawmalloc_objects()
@@ -2572,10 +2580,9 @@
# ----------
# Finalizers
- def deal_with_young_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_young_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
while self.young_objects_with_light_finalizers.non_empty():
@@ -2588,10 +2595,9 @@
obj = self.get_forwarding_address(obj)
self.old_objects_with_light_finalizers.append(obj)
- def deal_with_old_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_old_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
new_objects = self.AddressStack()
@@ -2608,6 +2614,16 @@
self.old_objects_with_light_finalizers.delete()
self.old_objects_with_light_finalizers = new_objects
+ def deal_with_young_objects_with_finalizers(self):
+ while self.probably_young_objects_with_finalizers.non_empty():
+ obj = self.probably_young_objects_with_finalizers.popleft()
+ fin_nr = self.probably_young_objects_with_finalizers.popleft()
+ singleaddr.address[0] = obj
+ self._trace_drag_out1(singleaddr)
+ obj = singleaddr.address[0]
+ self.old_objects_with_light_finalizers.append(obj)
+ self.old_objects_with_light_finalizers.append(fin_nr)
+
def deal_with_objects_with_finalizers(self):
# Walk over list of objects with finalizers.
# If it is not surviving, add it to the list of to-be-called
@@ -2814,9 +2830,6 @@
self.rrc_o_list_old = self.AddressStack()
self.rrc_p_dict = self.AddressDict() # non-nursery keys only
self.rrc_p_dict_nurs = self.AddressDict() # nursery keys only
- p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
- track_allocation=False)
- self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p)
self.rrc_dealloc_trigger_callback = dealloc_trigger_callback
self.rrc_dealloc_pending = self.AddressStack()
self.rrc_enabled = True
@@ -2886,7 +2899,7 @@
self.rrc_p_dict_nurs.delete()
self.rrc_p_dict_nurs = self.AddressDict(length_estimate)
self.rrc_p_list_young.foreach(self._rrc_minor_trace,
- self.rrc_singleaddr)
+ self.singleaddr)
def _rrc_minor_trace(self, pyobject, singleaddr):
from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY
@@ -2899,7 +2912,7 @@
# force the corresponding object to be alive
intobj = self._pyobj(pyobject).ob_pypy_link
singleaddr.address[0] = llmemory.cast_int_to_adr(intobj)
- self._trace_drag_out(singleaddr, llmemory.NULL)
+ self._trace_drag_out1(singleaddr)
def rrc_minor_collection_free(self):
ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 1")
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -1513,7 +1513,7 @@
self.translator = translator
super(TransformerLayoutBuilder, self).__init__(GCClass, lltype2vtable)
- def has_finalizer(self, TYPE):
+ def has_destructor(self, TYPE):
rtti = get_rtti(TYPE)
return rtti is not None and getattr(rtti._obj, 'destructor_funcptr',
None)
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -17,16 +17,17 @@
OFFSETS_TO_GC_PTR = lltype.Array(lltype.Signed)
- # A custom tracer (CT), enumerates the addresses that contain GCREFs.
- # It is called with the object as first argument, and the previous
- # returned address (or NULL the first time) as the second argument.
- FINALIZER_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
- FINALIZER = lltype.Ptr(FINALIZER_FUNC)
+ # A CUSTOM_FUNC is either a destructor, or a custom tracer.
+ # A destructor is called when the object is about to be freed.
+ # A custom tracer (CT) enumerates the addresses that contain GCREFs.
+ # Both are called with the address of the object as only argument.
+ CUSTOM_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
+ CUSTOM_FUNC_PTR = lltype.Ptr(CUSTOM_FUNC)
# structure describing the layout of a typeid
TYPE_INFO = lltype.Struct("type_info",
("infobits", lltype.Signed), # combination of the T_xxx consts
- ("finalizer", FINALIZER),
+ ("customfunc", CUSTOM_FUNC_PTR),
("fixedsize", lltype.Signed),
("ofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
hints={'immutable': True},
@@ -80,16 +81,10 @@
def q_cannot_pin(self, typeid):
typeinfo = self.get(typeid)
ANY = (T_HAS_GCPTR | T_IS_WEAKREF)
- return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.finalizer)
+ return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
- def q_finalizer(self, typeid):
- return self.get(typeid).finalizer
-
- def q_light_finalizer(self, typeid):
- typeinfo = self.get(typeid)
- if typeinfo.infobits & T_HAS_LIGHTWEIGHT_FINALIZER:
- return typeinfo.finalizer
- return lltype.nullptr(GCData.FINALIZER_FUNC)
+ def q_destructor_or_custom_trace(self, typeid):
+ return self.get(typeid).customfunc
def q_offsets_to_gc_pointers(self, typeid):
return self.get(typeid).ofstoptrs
@@ -141,8 +136,7 @@
self.q_is_varsize,
self.q_has_gcptr_in_varsize,
self.q_is_gcarrayofgcptr,
- self.q_finalizer,
- self.q_light_finalizer,
+ self.q_destructor_or_custom_trace,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
self.q_varsize_item_sizes,
@@ -170,9 +164,8 @@
T_IS_WEAKREF = 0x080000
T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT
T_HAS_CUSTOM_TRACE = 0x200000
-T_HAS_LIGHTWEIGHT_FINALIZER = 0x400000
-T_HAS_GCPTR = 0x1000000
-T_KEY_MASK = intmask(0xFE000000) # bug detection only
+T_HAS_GCPTR = 0x400000
+T_KEY_MASK = intmask(0xFF000000) # bug detection only
T_KEY_VALUE = intmask(0x5A000000) # bug detection only
def _check_valid_type_info(p):
@@ -199,11 +192,8 @@
#
fptrs = builder.special_funcptr_for_type(TYPE)
if fptrs:
- if "finalizer" in fptrs:
- info.finalizer = fptrs["finalizer"]
- if "light_finalizer" in fptrs:
- info.finalizer = fptrs["light_finalizer"]
- infobits |= T_HAS_LIGHTWEIGHT_FINALIZER
+ if "destructor" in fptrs:
+ info.customfunc = fptrs["destructor"]
#
if not TYPE._is_varsize():
info.fixedsize = llarena.round_up_for_allocation(
@@ -373,22 +363,19 @@
def special_funcptr_for_type(self, TYPE):
if TYPE in self._special_funcptrs:
return self._special_funcptrs[TYPE]
- fptr1, is_lightweight = self.make_finalizer_funcptr_for_type(TYPE)
+ fptr1 = self.make_destructor_funcptr_for_type(TYPE)
fptr2 = self.make_custom_trace_funcptr_for_type(TYPE)
result = {}
if fptr1:
- if is_lightweight:
- result["light_finalizer"] = fptr1
- else:
- result["finalizer"] = fptr1
+ result["destructor"] = fptr1
if fptr2:
result["custom_trace"] = fptr2
self._special_funcptrs[TYPE] = result
return result
- def make_finalizer_funcptr_for_type(self, TYPE):
+ def make_destructor_funcptr_for_type(self, TYPE):
# must be overridden for proper finalizer support
- return None, False
+ return None
def make_custom_trace_funcptr_for_type(self, TYPE):
# must be overridden for proper custom tracer support
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -228,7 +228,7 @@
self.llinterp = llinterp
super(DirectRunLayoutBuilder, self).__init__(GCClass, lltype2vtable)
- def make_finalizer_funcptr_for_type(self, TYPE):
+ def make_destructor_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
@@ -236,18 +236,19 @@
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
destrgraph = destrptr._obj.graph
else:
- return None, False
+ return None
t = self.llinterp.typer.annotator.translator
- light = not FinalizerAnalyzer(t).analyze_light_finalizer(destrgraph)
- def ll_finalizer(addr):
+ FinalizerAnalyzer(t).check_light_finalizer(destrgraph)
+
+ def ll_destructor(addr):
try:
v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
self.llinterp.eval_graph(destrgraph, [v], recursive=True)
except llinterp.LLException:
raise RuntimeError(
- "a finalizer raised an exception, shouldn't happen")
- return llhelper(gctypelayout.GCData.FINALIZER, ll_finalizer), light
+ "a destructor raised an exception, shouldn't happen")
+ return llhelper(gctypelayout.GCData.CUSTOM_FUNC_PTR, ll_destructor)
def make_custom_trace_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -369,25 +369,43 @@
class FinalizerQueue(object):
"""A finalizer queue. See pypy/doc/discussion/finalizer-order.rst.
+ Note: only works with the framework GCs (like minimark). It is
+ ignored with Boehm or with refcounting (used by tests).
"""
# Must be subclassed, and the subclass needs these attributes:
#
- # base_class:
- # the base class (or only class) of finalized objects
+ # Class:
+ # the class (or base class) of finalized objects
#
# def finalizer_trigger(self):
# called to notify that new items have been put in the queue
+ def _freeze_(self):
+ return True
+
+ @specialize.arg(0)
def next_dead(self):
- "NOT_RPYTHON: special-cased below"
+ if we_are_translated():
+ from rpython.rtyper.lltypesystem.lloperation import llop
+ from rpython.rtyper.rclass import OBJECTPTR
+ from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
+ ptr = llop.gc_fq_next_dead(OBJECTPTR, self)
+ return cast_base_ptr_to_instance(self.Class, ptr)
try:
return self._queue.popleft()
except (AttributeError, IndexError):
return None
+ @specialize.arg(0)
def register_finalizer(self, obj):
- "NOT_RPYTHON: special-cased below"
- assert isinstance(obj, self.base_class)
+ assert isinstance(obj, self.Class)
+ if we_are_translated():
+ from rpython.rtyper.lltypesystem.lloperation import llop
+ from rpython.rtyper.rclass import OBJECTPTR
+ from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr
+ ptr = cast_instance_to_base_ptr(obj)
+ llop.gc_fq_register(lltype.Void, self, ptr)
+ return
if hasattr(obj, '__enable_del_for_id'):
return # already called
diff --git a/rpython/translator/backendopt/finalizer.py b/rpython/translator/backendopt/finalizer.py
--- a/rpython/translator/backendopt/finalizer.py
+++ b/rpython/translator/backendopt/finalizer.py
@@ -3,8 +3,8 @@
from rpython.rtyper.lltypesystem import lltype
class FinalizerError(Exception):
- """ __del__ marked as lightweight finalizer, but the analyzer did
- not agree
+ """__del__() is used for lightweight RPython destructors,
+ but the FinalizerAnalyzer found that it is not lightweight.
"""
class FinalizerAnalyzer(graphanalyze.BoolGraphAnalyzer):
@@ -20,12 +20,10 @@
'direct_ptradd', 'force_cast', 'track_alloc_stop',
'raw_free', 'adr_eq', 'adr_ne']
- def analyze_light_finalizer(self, graph):
+ def check_light_finalizer(self, graph):
result = self.analyze_direct_call(graph)
- if (result is self.top_result() and
- getattr(graph.func, '_must_be_light_finalizer_', False)):
+ if result is self.top_result():
raise FinalizerError(FinalizerError.__doc__, graph)
- return result
def analyze_simple_operation(self, op, graphinfo):
if op.opname in self.ok_operations:
From pypy.commits at gmail.com Mon May 2 12:58:13 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 09:58:13 -0700 (PDT)
Subject: [pypy-commit] pypy default: Don't call '_trace_drag_out' directly,
we don't need it inlined yet
Message-ID: <57278725.4ca51c0a.2cbe3.55e6@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84130:45eb0969c1a6
Date: 2016-05-02 18:58 +0200
http://bitbucket.org/pypy/pypy/changeset/45eb0969c1a6/
Log: Don't call '_trace_drag_out' directly, we don't need it inlined yet
another time here
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2900,7 +2900,7 @@
# force the corresponding object to be alive
intobj = self._pyobj(pyobject).ob_pypy_link
singleaddr.address[0] = llmemory.cast_int_to_adr(intobj)
- self._trace_drag_out(singleaddr, llmemory.NULL)
+ self._trace_drag_out1(singleaddr)
def rrc_minor_collection_free(self):
ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 1")
From pypy.commits at gmail.com Mon May 2 13:18:26 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 10:18:26 -0700 (PDT)
Subject: [pypy-commit] pypy default: rename variables for clarity (and to
match py3k)
Message-ID: <57278be2.55301c0a.0d92.5ed5@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84131:0aba0ed90c42
Date: 2016-05-02 18:17 +0100
http://bitbucket.org/pypy/pypy/changeset/0aba0ed90c42/
Log: rename variables for clarity (and to match py3k)
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -703,10 +703,10 @@
EMPTY = None, None
def next(self):
- if self.dictimplementation is None:
+ if self.w_dict is None:
return EMPTY
space = self.space
- if self.len != self.dictimplementation.length():
+ if self.len != self.w_dict.length():
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed size during iteration")
@@ -715,7 +715,7 @@
if self.pos < self.len:
result = getattr(self, 'next_' + TP + '_entry')()
self.pos += 1
- if self.strategy is self.dictimplementation.get_strategy():
+ if self.strategy is self.w_dict.get_strategy():
return result # common case
else:
# waaa, obscure case: the strategy changed, but not the
@@ -725,28 +725,28 @@
if TP == 'key' or TP == 'value':
return result
w_key = result[0]
- w_value = self.dictimplementation.getitem(w_key)
+ w_value = self.w_dict.getitem(w_key)
if w_value is None:
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed during iteration")
return (w_key, w_value)
# no more entries
- self.dictimplementation = None
+ self.w_dict = None
return EMPTY
return func_with_new_name(next, 'next_' + TP)
class BaseIteratorImplementation(object):
- def __init__(self, space, strategy, implementation):
+ def __init__(self, space, strategy, w_dict):
self.space = space
self.strategy = strategy
- self.dictimplementation = implementation
- self.len = implementation.length()
+ self.w_dict = w_dict
+ self.len = w_dict.length()
self.pos = 0
def length(self):
- if self.dictimplementation is not None and self.len != -1:
+ if self.w_dict is not None and self.len != -1:
return self.len - self.pos
return 0
@@ -781,9 +781,9 @@
'setitem_untyped_%s' % dictimpl.__name__)
class IterClassKeys(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterkeys(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterkeys(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
@@ -792,9 +792,9 @@
return None
class IterClassValues(BaseValueIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getitervalues(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getitervalues(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_value_entry(self):
for value in self.iterator:
@@ -803,9 +803,9 @@
return None
class IterClassItems(BaseItemIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiteritems_with_hash(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiteritems_with_hash(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_item_entry(self):
for key, value, keyhash in self.iterator:
@@ -815,9 +815,9 @@
return None, None
class IterClassReversed(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterreversed(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterreversed(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -833,15 +833,14 @@
obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
class MapDictIteratorKeys(BaseKeyIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseKeyIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseKeyIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_key_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -855,15 +854,14 @@
class MapDictIteratorValues(BaseValueIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseValueIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseValueIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_value_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -876,15 +874,14 @@
class MapDictIteratorItems(BaseItemIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseItemIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseItemIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_item_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None, None
if self.curr_map:
From pypy.commits at gmail.com Mon May 2 13:31:14 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 10:31:14 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: in-progress
Message-ID: <57278ee2.45bd1c0a.7f058.6163@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84132:585ea21d4f7f
Date: 2016-05-02 19:31 +0200
http://bitbucket.org/pypy/pypy/changeset/585ea21d4f7f/
Log: in-progress
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1565,6 +1565,13 @@
self.header(shadow).tid |= GCFLAG_VISITED
new_shadow_object_dict.setitem(obj, shadow)
+ def register_finalizer(self, fq_index, gcobj):
+ from rpython.rtyper.lltypesystem import rffi
+ obj = llmemory.cast_ptr_to_adr(gcobj)
+ self.probably_young_objects_with_finalizers.append(obj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
+ self.probably_young_objects_with_finalizers.append(fq_index)
+
# ----------
# Nursery collection
@@ -2617,12 +2624,12 @@
def deal_with_young_objects_with_finalizers(self):
while self.probably_young_objects_with_finalizers.non_empty():
obj = self.probably_young_objects_with_finalizers.popleft()
- fin_nr = self.probably_young_objects_with_finalizers.popleft()
- singleaddr.address[0] = obj
- self._trace_drag_out1(singleaddr)
- obj = singleaddr.address[0]
- self.old_objects_with_light_finalizers.append(obj)
- self.old_objects_with_light_finalizers.append(fin_nr)
+ fq_nr = self.probably_young_objects_with_finalizers.popleft()
+ self.singleaddr.address[0] = obj
+ self._trace_drag_out1(self.singleaddr)
+ obj = self.singleaddr.address[0]
+ self.old_objects_with_finalizers.append(obj)
+ self.old_objects_with_finalizers.append(fq_nr)
def deal_with_objects_with_finalizers(self):
# Walk over list of objects with finalizers.
@@ -2635,14 +2642,17 @@
marked = self.AddressDeque()
pending = self.AddressStack()
self.tmpstack = self.AddressStack()
- while self.objects_with_finalizers.non_empty():
- x = self.objects_with_finalizers.popleft()
+ while self.old_objects_with_finalizers.non_empty():
+ x = self.old_objects_with_finalizers.popleft()
+ fq_nr = self.old_objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.header(x).tid & GCFLAG_VISITED:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
continue
marked.append(x)
+ marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
@@ -2662,9 +2672,11 @@
while marked.non_empty():
x = marked.popleft()
+ fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
if state == 2:
+ # XXX use fq_nr here
self.run_finalizers.append(x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
@@ -2672,12 +2684,13 @@
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
marked.delete()
- self.objects_with_finalizers.delete()
- self.objects_with_finalizers = new_with_finalizer
+ self.old_objects_with_finalizers.delete()
+ self.old_objects_with_finalizers = new_with_finalizer
def _append_if_nonnull(pointer, stack):
stack.append(pointer.address[0])
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -4,6 +4,7 @@
from rpython.rtyper.annlowlevel import llhelper
from rpython.memory import gctypelayout
from rpython.flowspace.model import Constant
+from rpython.rlib import rgc
class GCManagedHeap(object):
@@ -20,6 +21,7 @@
self.llinterp = llinterp
self.prepare_graphs(flowgraphs)
self.gc.setup()
+ self.finalizer_queues = {}
self.has_write_barrier_from_array = hasattr(self.gc,
'write_barrier_from_array')
@@ -187,6 +189,20 @@
def thread_run(self):
pass
+ def get_finalizer_queue_index(self, fq_tag):
+ assert fq_tag.expr == 'FinalizerQueue TAG'
+ fq = fq_tag.default
+ return self.finalizer_queues.setdefault(fq, len(self.finalizer_queues))
+
+ def gc_fq_next_dead(self, fq_tag):
+ index = self.get_finalizer_queue_index(fq_tag)
+ xxx
+
+ def gc_fq_register(self, fq_tag, ptr):
+ index = self.get_finalizer_queue_index(fq_tag)
+ ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
+ self.gc.register_finalizer(index, ptr)
+
# ____________________________________________________________
class LLInterpRootWalker:
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -128,7 +128,7 @@
assert res == concat(100)
#assert simulator.current_size - curr < 16000 * INT_SIZE / 4
- def test_finalizer(self):
+ def test_destructor(self):
class B(object):
pass
b = B()
@@ -152,6 +152,36 @@
res = self.interpret(f, [5])
assert res == 6
+ def test_finalizer(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ fq = FQ()
+ def f(x):
+ a = A()
+ i = 0
+ while i < x:
+ i += 1
+ a = A()
+ a = None
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted
+ res = self.interpret(f, [5])
+ assert res == 6
+
def test_finalizer_calls_malloc(self):
class B(object):
pass
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -5,6 +5,7 @@
from rpython.rlib import jit
from rpython.rlib.objectmodel import we_are_translated, enforceargs, specialize
+from rpython.rlib.objectmodel import CDefinedIntSymbolic
from rpython.rtyper.extregistry import ExtRegistryEntry
from rpython.rtyper.lltypesystem import lltype, llmemory
@@ -389,7 +390,7 @@
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.rclass import OBJECTPTR
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
- ptr = llop.gc_fq_next_dead(OBJECTPTR, self)
+ ptr = llop.gc_fq_next_dead(OBJECTPTR, self._get_tag())
return cast_base_ptr_to_instance(self.Class, ptr)
try:
return self._queue.popleft()
@@ -404,9 +405,16 @@
from rpython.rtyper.rclass import OBJECTPTR
from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr
ptr = cast_instance_to_base_ptr(obj)
- llop.gc_fq_register(lltype.Void, self, ptr)
+ llop.gc_fq_register(lltype.Void, self._get_tag(), ptr)
return
+ else:
+ self._untranslated_register_finalizer(obj)
+ @specialize.memo()
+ def _get_tag(self):
+ return CDefinedIntSymbolic('FinalizerQueue TAG', default=self)
+
+ def _untranslated_register_finalizer(self, obj):
if hasattr(obj, '__enable_del_for_id'):
return # already called
diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py
--- a/rpython/rtyper/llinterp.py
+++ b/rpython/rtyper/llinterp.py
@@ -720,6 +720,12 @@
def op_gc_add_memory_pressure(self, size):
self.heap.add_memory_pressure(size)
+ def op_gc_fq_next_dead(self, fq_tag):
+ return self.heap.gc_fq_next_dead(fq_tag)
+
+ def op_gc_fq_register(self, fq_tag, obj):
+ self.heap.gc_fq_register(fq_tag, obj)
+
def op_gc_gettypeid(self, obj):
return lloperation.llop.combine_ushort(lltype.Signed, self.heap.gettypeid(obj), 0)
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -504,6 +504,8 @@
'gc_gettypeid' : LLOp(),
'gc_gcflag_extra' : LLOp(),
'gc_add_memory_pressure': LLOp(),
+ 'gc_fq_next_dead' : LLOp(),
+ 'gc_fq_register' : LLOp(),
'gc_rawrefcount_init': LLOp(),
'gc_rawrefcount_create_link_pypy': LLOp(),
From pypy.commits at gmail.com Mon May 2 13:42:54 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Mon, 02 May 2016 10:42:54 -0700 (PDT)
Subject: [pypy-commit] pypy gc-forkfriendly: Hack out GC flags to possibly
be behind a pointer in incminimark.
Message-ID: <5727919e.2457c20a.4ec44.612c@mx.google.com>
Author: Devin Jeanpierre
Branch: gc-forkfriendly
Changeset: r84133:d7d1d6464379
Date: 2016-05-02 10:41 -0700
http://bitbucket.org/pypy/pypy/changeset/d7d1d6464379/
Log: Hack out GC flags to possibly be behind a pointer in incminimark.
TODO: allocate flags out of the struct, measure cost/benefit (i.e.
benchmark)
Sorry about the sledgehammer refactoring. I'm not familiar with the
GC code yet.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1039,7 +1039,7 @@
# object: a weakref, or one with any kind of finalizer.
return False
#
- self.header(obj).tid |= GCFLAG_PINNED
+ self.add_flags(obj, GCFLAG_PINNED)
self.pinned_objects_in_nursery += 1
return True
@@ -1048,11 +1048,11 @@
ll_assert(self._is_pinned(obj),
"unpin: object is already not pinned")
#
- self.header(obj).tid &= ~GCFLAG_PINNED
+ self.remove_flags(obj, GCFLAG_PINNED)
self.pinned_objects_in_nursery -= 1
def _is_pinned(self, obj):
- return (self.header(obj).tid & GCFLAG_PINNED) != 0
+ return (self.get_flags(obj) & GCFLAG_PINNED) != 0
def shrink_array(self, obj, smallerlength):
#
@@ -1065,7 +1065,7 @@
# the already-allocated shadow.
if not self.is_in_nursery(obj):
return False
- if self.header(obj).tid & GCFLAG_HAS_SHADOW:
+ if self.get_flags(obj) & GCFLAG_HAS_SHADOW:
return False
#
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -1083,7 +1083,7 @@
# Simple helpers
def get_type_id(self, obj):
- tid = self.header(obj).tid
+ tid = self.get_flags(obj)
return llop.extract_ushort(llgroup.HALFWORD, tid)
def combine(self, typeid16, flags):
@@ -1127,7 +1127,7 @@
that can never be set on a young object -- except if tid == -42.
"""
assert self.is_in_nursery(obj)
- tid = self.header(obj).tid
+ tid = self.get_flags(obj)
result = (tid & GCFLAG_FINALIZATION_ORDERING != 0)
if result:
ll_assert(tid == -42, "bogus header for young obj")
@@ -1208,9 +1208,9 @@
if not self._is_pinned(obj):
ll_assert(not self.is_in_nursery(obj),
"object in nursery after collection")
- ll_assert(self.header(obj).tid & GCFLAG_VISITED_RMY == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_VISITED_RMY == 0,
"GCFLAG_VISITED_RMY after collection")
- ll_assert(self.header(obj).tid & GCFLAG_PINNED == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_PINNED == 0,
"GCFLAG_PINNED outside the nursery after collection")
else:
ll_assert(self.is_in_nursery(obj),
@@ -1228,7 +1228,7 @@
ll_assert(False, "unknown gc_state value")
def _debug_check_object_marking(self, obj):
- if self.header(obj).tid & GCFLAG_VISITED != 0:
+ if self.get_flags(obj) & GCFLAG_VISITED != 0:
# A black object. Should NEVER point to a white object.
self.trace(obj, self._debug_check_not_white, None)
# During marking, all visited (black) objects should always have
@@ -1238,17 +1238,17 @@
# object state VISITED & ~WRITE_BARRIER.
typeid = self.get_type_id(obj)
if self.has_gcptr(typeid):
- ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_TRACK_YOUNG_PTRS != 0,
"black object without GCFLAG_TRACK_YOUNG_PTRS")
def _debug_check_not_white(self, root, ignored):
obj = root.address[0]
- if self.header(obj).tid & GCFLAG_VISITED != 0:
+ if self.get_flags(obj) & GCFLAG_VISITED != 0:
pass # black -> black
elif (self._debug_objects_to_trace_dict1.contains(obj) or
self._debug_objects_to_trace_dict2.contains(obj)):
pass # black -> gray
- elif self.header(obj).tid & GCFLAG_NO_HEAP_PTRS != 0:
+ elif self.get_flags(obj) & GCFLAG_NO_HEAP_PTRS != 0:
pass # black -> white-but-prebuilt-so-dont-care
elif self._is_pinned(obj):
# black -> pinned: the pinned object is a white one as
@@ -1267,23 +1267,23 @@
# don't have any GC pointer or are pinned objects
typeid = self.get_type_id(obj)
if self.has_gcptr(typeid) and not self._is_pinned(obj):
- ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_TRACK_YOUNG_PTRS != 0,
"missing GCFLAG_TRACK_YOUNG_PTRS")
# the GCFLAG_FINALIZATION_ORDERING should not be set between coll.
- ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_FINALIZATION_ORDERING == 0,
"unexpected GCFLAG_FINALIZATION_ORDERING")
# the GCFLAG_CARDS_SET should not be set between collections
- ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_CARDS_SET == 0,
"unexpected GCFLAG_CARDS_SET")
# if the GCFLAG_HAS_CARDS is set, check that all bits are zero now
- if self.header(obj).tid & GCFLAG_HAS_CARDS:
+ if self.get_flags(obj) & GCFLAG_HAS_CARDS:
if self.card_page_indices <= 0:
ll_assert(False, "GCFLAG_HAS_CARDS but not using card marking")
return
typeid = self.get_type_id(obj)
ll_assert(self.has_gcptr_in_varsize(typeid),
"GCFLAG_HAS_CARDS but not has_gcptr_in_varsize")
- ll_assert(self.header(obj).tid & GCFLAG_NO_HEAP_PTRS == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_NO_HEAP_PTRS == 0,
"GCFLAG_HAS_CARDS && GCFLAG_NO_HEAP_PTRS")
offset_to_length = self.varsize_offset_to_length(typeid)
length = (obj + offset_to_length).signed[0]
@@ -1306,7 +1306,7 @@
# This check is called before scanning starts.
# Scanning is done in a single step.
# the GCFLAG_VISITED should not be set between collections
- ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_VISITED == 0,
"unexpected GCFLAG_VISITED")
# All other invariants from the sweeping phase should still be
@@ -1345,11 +1345,11 @@
return cls.minimal_size_in_nursery
def write_barrier(self, addr_struct):
- if self.header(addr_struct).tid & GCFLAG_TRACK_YOUNG_PTRS:
+ if self.get_flags(addr_struct) & GCFLAG_TRACK_YOUNG_PTRS:
self.remember_young_pointer(addr_struct)
def write_barrier_from_array(self, addr_array, index):
- if self.header(addr_array).tid & GCFLAG_TRACK_YOUNG_PTRS:
+ if self.get_flags(addr_array) & GCFLAG_TRACK_YOUNG_PTRS:
if self.card_page_indices > 0:
self.remember_young_pointer_from_array2(addr_array, index)
else:
@@ -1367,7 +1367,7 @@
#
if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this
ll_assert(self.debug_is_old_object(addr_struct) or
- self.header(addr_struct).tid & GCFLAG_HAS_CARDS != 0,
+ self.get_flags(addr_struct) & GCFLAG_HAS_CARDS != 0,
"young object with GCFLAG_TRACK_YOUNG_PTRS and no cards")
#
# We need to remove the flag GCFLAG_TRACK_YOUNG_PTRS and add
@@ -1563,7 +1563,7 @@
# visit shadow to keep it alive
# XXX seems like it is save to set GCFLAG_VISITED, however
# should be double checked
- self.header(shadow).tid |= GCFLAG_VISITED
+ self.set_flags(shadow, GCFLAG_VISITED)
new_shadow_object_dict.setitem(obj, shadow)
# ----------
@@ -1711,7 +1711,7 @@
#
# clean up object's flags
obj = cur + size_gc_header
- self.header(obj).tid &= ~GCFLAG_VISITED
+ self.remove_flags(obj, GCFLAG_VISITED)
#
# create a new nursery barrier for the pinned object
nursery_barriers.append(cur)
@@ -1756,8 +1756,8 @@
debug_stop("gc-minor")
def _reset_flag_old_objects_pointing_to_pinned(self, obj, ignore):
- assert self.header(obj).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN
- self.header(obj).tid &= ~GCFLAG_PINNED_OBJECT_PARENT_KNOWN
+ assert self.get_flags(obj) & GCFLAG_PINNED_OBJECT_PARENT_KNOWN
+ self.remove_flags(obj, GCFLAG_PINNED_OBJECT_PARENT_KNOWN)
def _visit_old_objects_pointing_to_pinned(self, obj, ignore):
self.trace(obj, self._trace_drag_out, obj)
@@ -1794,9 +1794,9 @@
obj = oldlist.pop()
#
# Remove the GCFLAG_CARDS_SET flag.
- ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET != 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_CARDS_SET != 0,
"!GCFLAG_CARDS_SET but object in 'old_objects_with_cards_set'")
- self.header(obj).tid &= ~GCFLAG_CARDS_SET
+ self.remove_flags(obj, GCFLAG_CARDS_SET)
#
# Get the number of card marker bytes in the header.
typeid = self.get_type_id(obj)
@@ -1809,7 +1809,7 @@
# means that it is in 'old_objects_pointing_to_young' and
# will be fully traced by collect_oldrefs_to_nursery() just
# afterwards.
- if self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+ if self.get_flags(obj) & GCFLAG_TRACK_YOUNG_PTRS == 0:
#
# In that case, we just have to reset all card bits.
while bytes > 0:
@@ -1849,7 +1849,7 @@
ll_assert(not self.is_in_nursery(obj),
"expected nursery obj in collect_cardrefs_to_nursery")
if self.gc_state == STATE_MARKING:
- self.header(obj).tid &= ~GCFLAG_VISITED
+ self.remove_flags(obj, GCFLAG_VISITED)
self.more_objects_to_trace.append(obj)
@@ -1862,13 +1862,13 @@
#
# Check that the flags are correct: we must not have
# GCFLAG_TRACK_YOUNG_PTRS so far.
- ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0,
+ ll_assert(self.get_flags(obj) & GCFLAG_TRACK_YOUNG_PTRS == 0,
"old_objects_pointing_to_young contains obj with "
"GCFLAG_TRACK_YOUNG_PTRS")
#
# Add the flag GCFLAG_TRACK_YOUNG_PTRS. All live objects should
# have this flag set after a nursery collection.
- self.header(obj).tid |= GCFLAG_TRACK_YOUNG_PTRS
+ self.add_flags(obj, GCFLAG_TRACK_YOUNG_PTRS)
#
# Trace the 'obj' to replace pointers to nursery with pointers
# outside the nursery, possibly forcing nursery objects out
@@ -1908,7 +1908,7 @@
# Additionally, ignore pinned objects.
#
obj = root.address[0]
- if (self.header(obj).tid & (GCFLAG_VISITED | GCFLAG_PINNED)) == 0:
+ if (self.get_flags(obj) & (GCFLAG_VISITED | GCFLAG_PINNED)) == 0:
self.more_objects_to_trace.append(obj)
def _trace_drag_out(self, root, parent):
@@ -1929,7 +1929,7 @@
return
#
size_gc_header = self.gcheaderbuilder.size_gc_header
- if self.header(obj).tid & (GCFLAG_HAS_SHADOW | GCFLAG_PINNED) == 0:
+ if self.get_flags(obj) & (GCFLAG_HAS_SHADOW | GCFLAG_PINNED) == 0:
#
# Common case: 'obj' was not already forwarded (otherwise
# tid == -42, containing all flags), and it doesn't have the
@@ -1957,11 +1957,11 @@
# become dead and be removed just because the first parent of it
# is dead and collected.
if parent != llmemory.NULL and \
- not self.header(parent).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN:
+ not self.get_flags(parent) & GCFLAG_PINNED_OBJECT_PARENT_KNOWN:
#
self.old_objects_pointing_to_pinned.append(parent)
self.updated_old_objects_pointing_to_pinned = True
- self.header(parent).tid |= GCFLAG_PINNED_OBJECT_PARENT_KNOWN
+ self.set_flags(parent, GCFLAG_PINNED_OBJECT_PARENT_KNOWN)
#
if hdr.tid & GCFLAG_VISITED:
return
@@ -1981,7 +1981,7 @@
#
# Remove the flag GCFLAG_HAS_SHADOW, so that it doesn't get
# copied to the shadow itself.
- self.header(obj).tid &= ~GCFLAG_HAS_SHADOW
+ self.remove_flags(obj, GCFLAG_HAS_SHADOW)
#
totalsize = size_gc_header + self.get_size(obj)
self.nursery_surviving_size += raw_malloc_usage(totalsize)
@@ -1989,19 +1989,10 @@
# Copy it. Note that references to other objects in the
# nursery are kept unchanged in this step.
llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize)
- #
- # Set the old object's tid to -42 (containing all flags) and
- # replace the old object's content with the target address.
- # A bit of no-ops to convince llarena that we are changing
- # the layout, in non-translated versions.
typeid = self.get_type_id(obj)
- obj = llarena.getfakearenaaddress(obj)
- llarena.arena_reset(obj - size_gc_header, totalsize, 0)
- llarena.arena_reserve(obj - size_gc_header,
- size_gc_header + llmemory.sizeof(FORWARDSTUB))
- self.header(obj).tid = -42
newobj = newhdr + size_gc_header
- llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw = newobj
+ self.copy_header(obj, newobj)
+ self.make_forwardstub(obj, newobj)
#
# Change the original pointer to this object.
root.address[0] = newobj
@@ -2016,6 +2007,22 @@
_trace_drag_out._always_inline_ = True
+ def make_forwardstub(self, obj, forward_to):
+ """Turn obj into a forwarding stub to forward_to."""
+ # Set the old object's tid to -42 (containing all flags) and
+ # replace the old object's content with the target address.
+ # A bit of no-ops to convince llarena that we are changing
+ # the layout, in non-translated versions.
+ size_gc_header = self.gcheaderbuilder.size_gc_header
+ totalsize = size_gc_header + self.get_size(obj)
+ obj = llarena.getfakearenaaddress(obj)
+ llarena.arena_reset(obj - size_gc_header, totalsize, 0)
+ llarena.arena_reserve(obj - size_gc_header,
+ size_gc_header + llmemory.sizeof(FORWARDSTUB))
+ # self.set_flags(obj, -42) # Can't work.
+ self.header(obj).tid = -42
+ llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw = forward_to
+
def _visit_young_rawmalloced_object(self, obj):
# 'obj' points to a young, raw-malloced object.
# Any young rawmalloced object never seen by the code here
@@ -2102,7 +2109,7 @@
def _add_to_more_objects_to_trace(self, obj, ignored):
ll_assert(not self.is_in_nursery(obj), "unexpected nursery obj here")
- self.header(obj).tid &= ~GCFLAG_VISITED
+ self.remove_flags(obj, GCFLAG_VISITED)
self.more_objects_to_trace.append(obj)
def minor_and_major_collection(self):
@@ -2337,23 +2344,23 @@
debug_stop("gc-collect-step")
def _sweep_old_objects_pointing_to_pinned(self, obj, new_list):
- if self.header(obj).tid & GCFLAG_VISITED:
+ if self.get_flags(obj) & GCFLAG_VISITED:
new_list.append(obj)
def _free_if_unvisited(self, hdr):
size_gc_header = self.gcheaderbuilder.size_gc_header
obj = hdr + size_gc_header
- if self.header(obj).tid & GCFLAG_VISITED:
- self.header(obj).tid &= ~GCFLAG_VISITED
+ if self.get_flags(obj) & GCFLAG_VISITED:
+ self.remove_flags(obj, GCFLAG_VISITED)
return False # survives
return True # dies
def _reset_gcflag_visited(self, obj, ignored):
- self.header(obj).tid &= ~GCFLAG_VISITED
+ self.remove_flags(obj, GCFLAG_VISITED)
def free_rawmalloced_object_if_unvisited(self, obj, check_flag):
- if self.header(obj).tid & check_flag:
- self.header(obj).tid &= ~check_flag # survives
+ if self.get_flags(obj) & check_flag:
+ self.remove_flags(obj, check_flag) # survives
self.old_rawmalloced_objects.append(obj)
else:
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -2363,7 +2370,7 @@
#
# Must also include the card marker area, if any
if (self.card_page_indices > 0 # <- this is constant-folded
- and self.header(obj).tid & GCFLAG_HAS_CARDS):
+ and self.get_flags(obj) & GCFLAG_HAS_CARDS):
#
# Get the length and compute the number of extra bytes
typeid = self.get_type_id(obj)
@@ -2508,13 +2515,13 @@
# the next major collection, at which point we want
# it to look valid (but ready to be freed).
shadow = shadowhdr + size_gc_header
- self.header(shadow).tid = self.header(obj).tid
+ self.copy_header(obj, shadow)
typeid = self.get_type_id(obj)
if self.is_varsize(typeid):
lenofs = self.varsize_offset_to_length(typeid)
(shadow + lenofs).signed[0] = (obj + lenofs).signed[0]
#
- self.header(obj).tid |= GCFLAG_HAS_SHADOW
+ self.add_flags(obj, GCFLAG_HAS_SHADOW)
self.nursery_objects_shadows.setitem(obj, shadow)
return shadow
@@ -2524,7 +2531,7 @@
# nursery. Find or allocate a "shadow" object, which is
# where the object will be moved by the next minor
# collection
- if self.header(obj).tid & GCFLAG_HAS_SHADOW:
+ if self.get_flags(obj) & GCFLAG_HAS_SHADOW:
shadow = self.nursery_objects_shadows.get(obj)
ll_assert(shadow != llmemory.NULL,
"GCFLAG_HAS_SHADOW but no shadow found")
@@ -2546,7 +2553,7 @@
if self.is_in_nursery(obj):
obj = self._find_shadow(obj)
elif is_hash:
- if self.header(obj).tid & GCFLAG_HAS_SHADOW:
+ if self.get_flags(obj) & GCFLAG_HAS_SHADOW:
#
# For identityhash(), we need a special case for some
# prebuilt objects: their hash must be the same before
@@ -2598,7 +2605,7 @@
new_objects = self.AddressStack()
while self.old_objects_with_light_finalizers.non_empty():
obj = self.old_objects_with_light_finalizers.pop()
- if self.header(obj).tid & GCFLAG_VISITED:
+ if self.get_flags(obj) & GCFLAG_VISITED:
# surviving
new_objects.append(obj)
else:
@@ -2624,7 +2631,7 @@
x = self.objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
- if self.header(x).tid & GCFLAG_VISITED:
+ if self.get_flags(x) & GCFLAG_VISITED:
new_with_finalizer.append(x)
continue
marked.append(x)
@@ -2669,7 +2676,7 @@
_append_if_nonnull = staticmethod(_append_if_nonnull)
def _finalization_state(self, obj):
- tid = self.header(obj).tid
+ tid = self.get_flags(obj)
if tid & GCFLAG_VISITED:
if tid & GCFLAG_FINALIZATION_ORDERING:
return 2
@@ -2748,13 +2755,13 @@
elif (bool(self.young_rawmalloced_objects) and
self.young_rawmalloced_objects.contains(pointing_to)):
# young weakref to a young raw-malloced object
- if self.header(pointing_to).tid & GCFLAG_VISITED_RMY:
+ if self.get_flags(pointing_to) & GCFLAG_VISITED_RMY:
pass # survives, but does not move
else:
(obj + offset).address[0] = llmemory.NULL
continue # no need to remember this weakref any longer
#
- elif self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS:
+ elif self.get_flags(pointing_to) & GCFLAG_NO_HEAP_PTRS:
# see test_weakref_to_prebuilt: it's not useful to put
# weakrefs into 'old_objects_with_weakrefs' if they point
# to a prebuilt object (they are immortal). If moreover
@@ -2774,14 +2781,14 @@
new_with_weakref = self.AddressStack()
while self.old_objects_with_weakrefs.non_empty():
obj = self.old_objects_with_weakrefs.pop()
- if self.header(obj).tid & GCFLAG_VISITED == 0:
+ if self.get_flags(obj) & GCFLAG_VISITED == 0:
continue # weakref itself dies
offset = self.weakpointer_offset(self.get_type_id(obj))
pointing_to = (obj + offset).address[0]
- ll_assert((self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS)
+ ll_assert((self.get_flags(pointing_to) & GCFLAG_NO_HEAP_PTRS)
== 0, "registered old weakref should not "
"point to a NO_HEAP_PTRS obj")
- tid = self.header(pointing_to).tid
+ tid = self.get_flags(pointing_to)
if ((tid & (GCFLAG_VISITED | GCFLAG_FINALIZATION_ORDERING)) ==
GCFLAG_VISITED):
new_with_weakref.append(obj)
@@ -2933,7 +2940,7 @@
elif (bool(self.young_rawmalloced_objects) and
self.young_rawmalloced_objects.contains(obj)):
# young weakref to a young raw-malloced object
- if self.header(obj).tid & GCFLAG_VISITED_RMY:
+ if self.get_flags(obj) & GCFLAG_VISITED_RMY:
surviving = True # survives, but does not move
else:
surviving = False
@@ -3027,9 +3034,26 @@
# * GCFLAG_NO_HEAP_PTRS: immortal object never traced (so far)
intobj = self._pyobj(pyobject).ob_pypy_link
obj = llmemory.cast_int_to_adr(intobj)
- if self.header(obj).tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS):
+ if self.get_flags(obj) & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS):
surviving_list.append(pyobject)
if surviving_dict:
surviving_dict.insertclean(obj, pyobject)
else:
self._rrc_free(pyobject)
+
+ # Methods meant to be overridden by subclasses that store flags elsewhere.
+
+ def copy_header(self, src, dest):
+ self.header(dest).tid = self.header(src).tid
+
+ def get_flags(self, obj):
+ return self.header(obj).tid
+
+ def set_flags(self, obj, flags):
+ self.header(obj).tid=flags
+
+ def add_flags(self, obj, flags):
+ self.header(obj).tid|=flags
+
+ def remove_flags(self, obj, flags):
+ self.header(obj).tid&=~flags
diff --git a/rpython/memory/gc/incminimark_remoteheader.py b/rpython/memory/gc/incminimark_remoteheader.py
new file mode 100644
--- /dev/null
+++ b/rpython/memory/gc/incminimark_remoteheader.py
@@ -0,0 +1,45 @@
+"""Incminimark with GC flags stored in a separate page for fork-friendliness."""
+
+from rpython.memory.gc import incminimark
+from rpython.rtyper.lltypesystem import lltype, llmemory
+
+class IncrementalMiniMarkRemoteHeaderGC(incminimark.IncrementalMiniMarkGC):
+ # The GC header is similar to incminimark, except that the flags can be
+ # placed anywhere, not just in the bits of tid.
+ # TODO: Actually place flags somewhere other than tid.
+ HDR = lltype.Struct('header',
+ ('tid', lltype.Signed),
+ ('remote_flags', lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))))
+
+ def init_gc_object(self, addr, typeid16, flags=0):
+ super(IncrementalMiniMarkRemoteHeaderGC, self).init_gc_object(addr, typeid16, flags)
+ hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
+ hdr.remote_flags = lltype.direct_fieldptr(hdr, 'tid')
+
+ def make_forwardstub(self, obj, forward_to):
+ assert (self.header(obj).remote_flags
+ == lltype.direct_fieldptr(self.header(obj), 'tid')), \
+ "Nursery objects should not have separately-allocated flags."
+ super(IncrementalMiniMarkRemoteHeaderGC, self).make_forwardstub(obj, forward_to)
+ hdr = self.header(obj)
+ hdr.remote_flags = lltype.direct_fieldptr(hdr, 'tid')
+
+ def copy_header(self, src, dest):
+ dest_hdr = self.header(dest)
+ dest_hdr.tid = self.get_flags(src)
+ dest_hdr.remote_flags = lltype.direct_fieldptr(dest_hdr, 'tid')
+ # TODO: make new remote flag sometimes.
+
+ # Manipulate flags through a pointer.
+
+ def get_flags(self, obj):
+ return self.header(obj).remote_flags[0]
+
+ def set_flags(self, obj, flags):
+ self.header(obj).remote_flags[0] = flags
+
+ def add_flags(self, obj, flags):
+ self.header(obj).remote_flags[0] |= flags
+
+ def remove_flags(self, obj, flags):
+ self.header(obj).remote_flags[0] &= ~flags
diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -668,6 +668,9 @@
self.gc.debug_gc_step_until(incminimark.STATE_SCANNING)
assert self.stackroots[1].x == 13
+class TestIncrementalMiniMarkRemoteHeaderGCSimple(TestIncrementalMiniMarkGCSimple):
+ from rpython.memory.gc.incminimark_remoteheader import IncrementalMiniMarkRemoteHeaderGC as GCClass
+
class TestIncrementalMiniMarkGCFull(DirectGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
def test_malloc_fixedsize_no_cleanup(self):
@@ -744,4 +747,5 @@
assert elem.prev == lltype.nullptr(S)
assert elem.next == lltype.nullptr(S)
-
+class TestIncrementalMiniMarkRemoteHeaderGCFull(TestIncrementalMiniMarkGCFull):
+ from rpython.memory.gc.incminimark_remoteheader import IncrementalMiniMarkRemoteHeaderGC as GCClass
From pypy.commits at gmail.com Mon May 2 13:48:06 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 10:48:06 -0700 (PDT)
Subject: [pypy-commit] pypy default: ooops! Since 8fb078df2c3d,
most of the tests in this file don't run.
Message-ID: <572792d6.82bb1c0a.4e99d.6861@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84134:136845ff4b16
Date: 2016-05-02 19:46 +0200
http://bitbucket.org/pypy/pypy/changeset/136845ff4b16/
Log: ooops! Since 8fb078df2c3d, most of the tests in this file don't run.
That's because they are local functions of another test function...
diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py
--- a/rpython/translator/backendopt/test/test_finalizer.py
+++ b/rpython/translator/backendopt/test/test_finalizer.py
@@ -35,31 +35,6 @@
r = self.analyze(f, [])
assert not r
-def test_various_ops():
- from rpython.flowspace.model import SpaceOperation, Constant
-
- X = lltype.Ptr(lltype.GcStruct('X'))
- Z = lltype.Ptr(lltype.Struct('Z'))
- S = lltype.GcStruct('S', ('x', lltype.Signed),
- ('y', X),
- ('z', Z))
- v1 = varoftype(lltype.Bool)
- v2 = varoftype(lltype.Signed)
- f = FinalizerAnalyzer(None)
- r = f.analyze(SpaceOperation('cast_int_to_bool', [v2],
- v1))
- assert not r
- v1 = varoftype(lltype.Ptr(S))
- v2 = varoftype(lltype.Signed)
- v3 = varoftype(X)
- v4 = varoftype(Z)
- assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('x'),
- v2], None))
- assert f.analyze(SpaceOperation('bare_setfield', [v1, Constant('y'),
- v3], None))
- assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'),
- v4], None))
-
def test_malloc(self):
S = lltype.GcStruct('S')
@@ -131,3 +106,30 @@
pass
self.analyze(g, []) # did not explode
py.test.raises(FinalizerError, self.analyze, f, [])
+
+
+def test_various_ops():
+ from rpython.flowspace.model import SpaceOperation, Constant
+
+ X = lltype.Ptr(lltype.GcStruct('X'))
+ Z = lltype.Ptr(lltype.Struct('Z'))
+ S = lltype.GcStruct('S', ('x', lltype.Signed),
+ ('y', X),
+ ('z', Z))
+ v1 = varoftype(lltype.Bool)
+ v2 = varoftype(lltype.Signed)
+ f = FinalizerAnalyzer(None)
+ r = f.analyze(SpaceOperation('cast_int_to_bool', [v2],
+ v1))
+ assert not r
+ v1 = varoftype(lltype.Ptr(S))
+ v2 = varoftype(lltype.Signed)
+ v3 = varoftype(X)
+ v4 = varoftype(Z)
+ assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('x'),
+ v2], None))
+ assert f.analyze(SpaceOperation('bare_setfield', [v1, Constant('y'),
+ v3], None))
+ assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'),
+ v4], None))
+
From pypy.commits at gmail.com Mon May 2 13:48:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 10:48:07 -0700 (PDT)
Subject: [pypy-commit] pypy default: Of course one of the test fails
nowadays, fixed (in two versions).
Message-ID: <572792d7.6a70c20a.ed3cf.47e5@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84135:ad7b391873db
Date: 2016-05-02 19:48 +0200
http://bitbucket.org/pypy/pypy/changeset/ad7b391873db/
Log: Of course one of the test fails nowadays, fixed (in two versions).
diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py
--- a/rpython/translator/backendopt/test/test_finalizer.py
+++ b/rpython/translator/backendopt/test/test_finalizer.py
@@ -75,6 +75,22 @@
lltype.free(p, flavor='raw')
r = self.analyze(g, [], f, backendopt=True)
+ assert r
+
+ def test_c_call_without_release_gil(self):
+ C = rffi.CArray(lltype.Signed)
+ c = rffi.llexternal('x', [lltype.Ptr(C)], lltype.Signed,
+ releasegil=False)
+
+ def g():
+ p = lltype.malloc(C, 3, flavor='raw')
+ f(p)
+
+ def f(p):
+ c(rffi.ptradd(p, 0))
+ lltype.free(p, flavor='raw')
+
+ r = self.analyze(g, [], f, backendopt=True)
assert not r
def test_chain(self):
From pypy.commits at gmail.com Mon May 2 13:48:09 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 10:48:09 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge heads
Message-ID: <572792d9.c61ec20a.6a80e.7630@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84136:b76bb5e1d3cf
Date: 2016-05-02 19:48 +0200
http://bitbucket.org/pypy/pypy/changeset/b76bb5e1d3cf/
Log: merge heads
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -703,10 +703,10 @@
EMPTY = None, None
def next(self):
- if self.dictimplementation is None:
+ if self.w_dict is None:
return EMPTY
space = self.space
- if self.len != self.dictimplementation.length():
+ if self.len != self.w_dict.length():
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed size during iteration")
@@ -715,7 +715,7 @@
if self.pos < self.len:
result = getattr(self, 'next_' + TP + '_entry')()
self.pos += 1
- if self.strategy is self.dictimplementation.get_strategy():
+ if self.strategy is self.w_dict.get_strategy():
return result # common case
else:
# waaa, obscure case: the strategy changed, but not the
@@ -725,28 +725,28 @@
if TP == 'key' or TP == 'value':
return result
w_key = result[0]
- w_value = self.dictimplementation.getitem(w_key)
+ w_value = self.w_dict.getitem(w_key)
if w_value is None:
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed during iteration")
return (w_key, w_value)
# no more entries
- self.dictimplementation = None
+ self.w_dict = None
return EMPTY
return func_with_new_name(next, 'next_' + TP)
class BaseIteratorImplementation(object):
- def __init__(self, space, strategy, implementation):
+ def __init__(self, space, strategy, w_dict):
self.space = space
self.strategy = strategy
- self.dictimplementation = implementation
- self.len = implementation.length()
+ self.w_dict = w_dict
+ self.len = w_dict.length()
self.pos = 0
def length(self):
- if self.dictimplementation is not None and self.len != -1:
+ if self.w_dict is not None and self.len != -1:
return self.len - self.pos
return 0
@@ -781,9 +781,9 @@
'setitem_untyped_%s' % dictimpl.__name__)
class IterClassKeys(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterkeys(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterkeys(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
@@ -792,9 +792,9 @@
return None
class IterClassValues(BaseValueIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getitervalues(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getitervalues(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_value_entry(self):
for value in self.iterator:
@@ -803,9 +803,9 @@
return None
class IterClassItems(BaseItemIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiteritems_with_hash(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiteritems_with_hash(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_item_entry(self):
for key, value, keyhash in self.iterator:
@@ -815,9 +815,9 @@
return None, None
class IterClassReversed(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterreversed(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterreversed(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -833,15 +833,14 @@
obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
class MapDictIteratorKeys(BaseKeyIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseKeyIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseKeyIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_key_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -855,15 +854,14 @@
class MapDictIteratorValues(BaseValueIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseValueIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseValueIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_value_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -876,15 +874,14 @@
class MapDictIteratorItems(BaseItemIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseItemIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseItemIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_item_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None, None
if self.curr_map:
From pypy.commits at gmail.com Mon May 2 13:49:21 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 10:49:21 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: in-progress
Message-ID: <57279321.442cc20a.ce956.4e28@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84137:1d403288f1ac
Date: 2016-05-02 19:48 +0200
http://bitbucket.org/pypy/pypy/changeset/1d403288f1ac/
Log: in-progress
diff --git a/rpython/translator/backendopt/finalizer.py b/rpython/translator/backendopt/finalizer.py
--- a/rpython/translator/backendopt/finalizer.py
+++ b/rpython/translator/backendopt/finalizer.py
@@ -5,25 +5,30 @@
class FinalizerError(Exception):
"""__del__() is used for lightweight RPython destructors,
but the FinalizerAnalyzer found that it is not lightweight.
+
+ The set of allowed operations is restrictive for a good reason
+ - it's better to be safe. Specifically disallowed operations:
+
+ * anything that escapes self
+ * anything that can allocate
"""
class FinalizerAnalyzer(graphanalyze.BoolGraphAnalyzer):
""" Analyzer that determines whether a finalizer is lightweight enough
so it can be called without all the complicated logic in the garbage
- collector. The set of operations here is restrictive for a good reason
- - it's better to be safe. Specifically disallowed operations:
-
- * anything that escapes self
- * anything that can allocate
+ collector.
"""
ok_operations = ['ptr_nonzero', 'ptr_eq', 'ptr_ne', 'free', 'same_as',
'direct_ptradd', 'force_cast', 'track_alloc_stop',
'raw_free', 'adr_eq', 'adr_ne']
def check_light_finalizer(self, graph):
+ self._origin = graph
result = self.analyze_direct_call(graph)
+ del self._origin
if result is self.top_result():
- raise FinalizerError(FinalizerError.__doc__, graph)
+ msg = '%s\nIn %r' % (FinalizerError.__doc__, graph)
+ raise FinalizerError(msg)
def analyze_simple_operation(self, op, graphinfo):
if op.opname in self.ok_operations:
@@ -41,4 +46,10 @@
if not isinstance(TP, lltype.Ptr) or TP.TO._gckind == 'raw':
# primitive type
return self.bottom_result()
- return self.top_result()
+
+ if not hasattr(self, '_origin'): # for tests
+ return self.top_result()
+ msg = '%s\nFound this forbidden operation:\n%r\nin %r\nfrom %r' % (
+ FinalizerError.__doc__, op, graphinfo,
+ getattr(self, '_origin', '?'))
+ raise FinalizerError(msg)
diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py
--- a/rpython/translator/backendopt/test/test_finalizer.py
+++ b/rpython/translator/backendopt/test/test_finalizer.py
@@ -26,8 +26,12 @@
t.view()
a = FinalizerAnalyzer(t)
fgraph = graphof(t, func_to_analyze)
- result = a.analyze_light_finalizer(fgraph)
- return result
+ try:
+ a.check_light_finalizer(fgraph)
+ except FinalizerError as e:
+ print e
+ return a.top_result() # True
+ return a.bottom_result() # False
def test_nothing(self):
def f():
From pypy.commits at gmail.com Mon May 2 13:49:22 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 02 May 2016 10:49:22 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: hg merge default
Message-ID: <57279322.cf8ec20a.1afa0.4d6d@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84138:5c82986cf9e9
Date: 2016-05-02 19:49 +0200
http://bitbucket.org/pypy/pypy/changeset/5c82986cf9e9/
Log: hg merge default
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -54,3 +54,10 @@
generated subclasses.
.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -78,7 +78,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
# we arrive here if no exception is raised. stdout cosmetics...
try:
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -436,7 +436,7 @@
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected" % (s, self.templ_args[i])))
+ "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -790,6 +790,8 @@
from rpython.rlib import rgil
argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
fatal_value = restype._defl()
+ gil_auto_workaround = (gil is None) # automatically detect when we don't
+ # have the GIL, and acquire/release it
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
pygilstate_ensure = (gil == "pygilstate_ensure")
@@ -825,7 +827,8 @@
# see "Handling of the GIL" above (careful, we don't have the GIL here)
tid = rthread.get_or_make_ident()
- if gil_acquire:
+ _gil_auto = (gil_auto_workaround and cpyext_glob_tid_ptr[0] != tid)
+ if gil_acquire or _gil_auto:
if cpyext_glob_tid_ptr[0] == tid:
deadlock_error(nameof(callable))
rgil.acquire()
@@ -919,7 +922,7 @@
arg = rffi.cast(lltype.Signed, args[-1])
unlock = (arg == pystate.PyGILState_UNLOCKED)
else:
- unlock = gil_release
+ unlock = gil_release or _gil_auto
if unlock:
rgil.release()
else:
diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py
--- a/pypy/module/unicodedata/interp_ucd.py
+++ b/pypy/module/unicodedata/interp_ucd.py
@@ -4,7 +4,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.objectmodel import we_are_translated
@@ -34,8 +34,9 @@
# Target is wide build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode == 0xFFFF:
# Host CPython is narrow build, accept surrogates
@@ -54,8 +55,9 @@
# Target is narrow build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode > 0xFFFF:
# Host CPython is wide build, forbid surrogates
@@ -179,7 +181,9 @@
@unwrap_spec(form=str)
def normalize(self, space, form, w_unistr):
if not space.isinstance_w(w_unistr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap('argument 2 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 2 must be unicode, not %T',
+ w_unistr)
if form == 'NFC':
composed = True
decomposition = self._canon_decomposition
diff --git a/pypy/module/unicodedata/test/test_unicodedata.py b/pypy/module/unicodedata/test/test_unicodedata.py
--- a/pypy/module/unicodedata/test/test_unicodedata.py
+++ b/pypy/module/unicodedata/test/test_unicodedata.py
@@ -78,10 +78,15 @@
import unicodedata
assert unicodedata.lookup("GOTHIC LETTER FAIHU") == u'\U00010346'
- def test_normalize(self):
+ def test_normalize_bad_argcount(self):
import unicodedata
raises(TypeError, unicodedata.normalize, 'x')
+ def test_normalize_nonunicode(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.normalize, 'NFC', 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
+
@py.test.mark.skipif("sys.maxunicode < 0x10ffff")
def test_normalize_wide(self):
import unicodedata
@@ -103,6 +108,12 @@
# For no reason, unicodedata.mirrored() returns an int, not a bool
assert repr(unicodedata.mirrored(u' ')) == '0'
- def test_bidirectional(self):
+ def test_bidirectional_not_one_character(self):
import unicodedata
- raises(TypeError, unicodedata.bidirectional, u'xx')
+ exc_info = raises(TypeError, unicodedata.bidirectional, u'xx')
+ assert str(exc_info.value) == 'need a single Unicode character as parameter'
+
+ def test_bidirectional_not_one_character(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.bidirectional, 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -703,10 +703,10 @@
EMPTY = None, None
def next(self):
- if self.dictimplementation is None:
+ if self.w_dict is None:
return EMPTY
space = self.space
- if self.len != self.dictimplementation.length():
+ if self.len != self.w_dict.length():
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed size during iteration")
@@ -715,7 +715,7 @@
if self.pos < self.len:
result = getattr(self, 'next_' + TP + '_entry')()
self.pos += 1
- if self.strategy is self.dictimplementation.get_strategy():
+ if self.strategy is self.w_dict.get_strategy():
return result # common case
else:
# waaa, obscure case: the strategy changed, but not the
@@ -725,28 +725,28 @@
if TP == 'key' or TP == 'value':
return result
w_key = result[0]
- w_value = self.dictimplementation.getitem(w_key)
+ w_value = self.w_dict.getitem(w_key)
if w_value is None:
self.len = -1 # Make this error state sticky
raise oefmt(space.w_RuntimeError,
"dictionary changed during iteration")
return (w_key, w_value)
# no more entries
- self.dictimplementation = None
+ self.w_dict = None
return EMPTY
return func_with_new_name(next, 'next_' + TP)
class BaseIteratorImplementation(object):
- def __init__(self, space, strategy, implementation):
+ def __init__(self, space, strategy, w_dict):
self.space = space
self.strategy = strategy
- self.dictimplementation = implementation
- self.len = implementation.length()
+ self.w_dict = w_dict
+ self.len = w_dict.length()
self.pos = 0
def length(self):
- if self.dictimplementation is not None and self.len != -1:
+ if self.w_dict is not None and self.len != -1:
return self.len - self.pos
return 0
@@ -781,9 +781,9 @@
'setitem_untyped_%s' % dictimpl.__name__)
class IterClassKeys(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterkeys(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterkeys(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
@@ -792,9 +792,9 @@
return None
class IterClassValues(BaseValueIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getitervalues(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getitervalues(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_value_entry(self):
for value in self.iterator:
@@ -803,9 +803,9 @@
return None
class IterClassItems(BaseItemIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiteritems_with_hash(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiteritems_with_hash(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_item_entry(self):
for key, value, keyhash in self.iterator:
@@ -815,9 +815,9 @@
return None, None
class IterClassReversed(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterreversed(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterreversed(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -833,15 +833,14 @@
obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
class MapDictIteratorKeys(BaseKeyIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseKeyIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseKeyIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_key_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -855,15 +854,14 @@
class MapDictIteratorValues(BaseValueIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseValueIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseValueIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_value_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -876,15 +874,14 @@
class MapDictIteratorItems(BaseItemIterator):
- def __init__(self, space, strategy, dictimplementation):
- BaseItemIterator.__init__(self, space, strategy, dictimplementation)
- w_obj = strategy.unerase(dictimplementation.dstorage)
+ def __init__(self, space, strategy, w_dict):
+ BaseItemIterator.__init__(self, space, strategy, w_dict)
+ w_obj = strategy.unerase(w_dict.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_item_entry(self):
- implementation = self.dictimplementation
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None, None
if self.curr_map:
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -560,7 +560,7 @@
msg = "Sign not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
- msg = "Alternate form not allowed in string format specifier"
+ msg = "Alternate form (#) not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._align == "=":
msg = "'=' alignment not allowed in string format specifier"
@@ -920,7 +920,7 @@
flags = 0
default_precision = 6
if self._alternate:
- msg = "alternate form not allowed in float formats"
+ msg = "Alternate form (#) not allowed in float formats"
raise OperationError(space.w_ValueError, space.wrap(msg))
tp = self._type
self._get_locale(tp)
@@ -998,9 +998,9 @@
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
#alternate is invalid
- msg = "Alternate form %s not allowed in complex format specifier"
+ msg = "Alternate form (#) not allowed in complex format specifier"
raise OperationError(space.w_ValueError,
- space.wrap(msg % (self._alternate)))
+ space.wrap(msg))
skip_re = 0
add_parens = 0
if tp == "\0":
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -143,7 +143,6 @@
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
- #import pdb; pdb.set_trace()
assert d.meth() == 1
assert e.meth() == 1
assert d.a == 2
@@ -184,7 +183,7 @@
try:
D.__bases__ = ()
- except TypeError, msg:
+ except TypeError as msg:
if str(msg) == "a new-style class can't have only classic bases":
assert 0, "wrong error message for .__bases__ = ()"
else:
@@ -309,7 +308,7 @@
except TypeError:
pass
else:
- raise TestFailed, "didn't catch MRO conflict"
+ raise TestFailed("didn't catch MRO conflict")
def test_mutable_bases_versus_nonheap_types(self):
class A(int):
@@ -442,7 +441,7 @@
except TypeError:
pass
else:
- raise AssertionError, "this multiple inheritance should fail"
+ raise AssertionError("this multiple inheritance should fail")
def test_outer_metaclass(self):
class OuterMetaClass(type):
@@ -512,7 +511,7 @@
try:
assert NoDoc.__doc__ == None
except AttributeError:
- raise AssertionError, "__doc__ missing!"
+ raise AssertionError("__doc__ missing!")
def test_explicitdoc(self):
class ExplicitDoc(object):
@@ -539,7 +538,7 @@
# we always raise AttributeError.
pass
else:
- raise AssertionError, '__doc__ should not be writable'
+ raise AssertionError('__doc__ should not be writable')
assert ImmutableDoc.__doc__ == 'foo'
@@ -1048,14 +1047,14 @@
try:
class E(B, A): # "best base" is B
__slots__ = ("__dict__",)
- except TypeError, e:
+ except TypeError as e:
assert 'we already got one' in str(e)
else:
raise AssertionError("TypeError not raised")
try:
class F(B, A): # "best base" is B
__slots__ = ("__weakref__",)
- except TypeError, e:
+ except TypeError as e:
assert 'we already got one' in str(e)
else:
raise AssertionError("TypeError not raised")
diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py
--- a/rpython/translator/backendopt/test/test_finalizer.py
+++ b/rpython/translator/backendopt/test/test_finalizer.py
@@ -39,31 +39,6 @@
r = self.analyze(f, [])
assert not r
-def test_various_ops():
- from rpython.flowspace.model import SpaceOperation, Constant
-
- X = lltype.Ptr(lltype.GcStruct('X'))
- Z = lltype.Ptr(lltype.Struct('Z'))
- S = lltype.GcStruct('S', ('x', lltype.Signed),
- ('y', X),
- ('z', Z))
- v1 = varoftype(lltype.Bool)
- v2 = varoftype(lltype.Signed)
- f = FinalizerAnalyzer(None)
- r = f.analyze(SpaceOperation('cast_int_to_bool', [v2],
- v1))
- assert not r
- v1 = varoftype(lltype.Ptr(S))
- v2 = varoftype(lltype.Signed)
- v3 = varoftype(X)
- v4 = varoftype(Z)
- assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('x'),
- v2], None))
- assert f.analyze(SpaceOperation('bare_setfield', [v1, Constant('y'),
- v3], None))
- assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'),
- v4], None))
-
def test_malloc(self):
S = lltype.GcStruct('S')
@@ -104,6 +79,22 @@
lltype.free(p, flavor='raw')
r = self.analyze(g, [], f, backendopt=True)
+ assert r
+
+ def test_c_call_without_release_gil(self):
+ C = rffi.CArray(lltype.Signed)
+ c = rffi.llexternal('x', [lltype.Ptr(C)], lltype.Signed,
+ releasegil=False)
+
+ def g():
+ p = lltype.malloc(C, 3, flavor='raw')
+ f(p)
+
+ def f(p):
+ c(rffi.ptradd(p, 0))
+ lltype.free(p, flavor='raw')
+
+ r = self.analyze(g, [], f, backendopt=True)
assert not r
def test_chain(self):
@@ -135,3 +126,30 @@
pass
self.analyze(g, []) # did not explode
py.test.raises(FinalizerError, self.analyze, f, [])
+
+
+def test_various_ops():
+ from rpython.flowspace.model import SpaceOperation, Constant
+
+ X = lltype.Ptr(lltype.GcStruct('X'))
+ Z = lltype.Ptr(lltype.Struct('Z'))
+ S = lltype.GcStruct('S', ('x', lltype.Signed),
+ ('y', X),
+ ('z', Z))
+ v1 = varoftype(lltype.Bool)
+ v2 = varoftype(lltype.Signed)
+ f = FinalizerAnalyzer(None)
+ r = f.analyze(SpaceOperation('cast_int_to_bool', [v2],
+ v1))
+ assert not r
+ v1 = varoftype(lltype.Ptr(S))
+ v2 = varoftype(lltype.Signed)
+ v3 = varoftype(X)
+ v4 = varoftype(Z)
+ assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('x'),
+ v2], None))
+ assert f.analyze(SpaceOperation('bare_setfield', [v1, Constant('y'),
+ v3], None))
+ assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'),
+ v4], None))
+
From pypy.commits at gmail.com Mon May 2 14:20:39 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 11:20:39 -0700 (PDT)
Subject: [pypy-commit] pypy default: Don't use deprecated raise statement
syntax
Message-ID: <57279a77.22c8c20a.e6a6c.5b04@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84139:d1f09c46b8e7
Date: 2016-05-02 19:19 +0100
http://bitbucket.org/pypy/pypy/changeset/d1f09c46b8e7/
Log: Don't use deprecated raise statement syntax
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -550,7 +550,7 @@
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
- raise ValueError, "new fastscope is longer than the allocated area"
+ raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1110,7 +1110,7 @@
return next_instr
def FOR_LOOP(self, oparg, next_instr):
- raise BytecodeCorruption, "old opcode, no longer in use"
+ raise BytecodeCorruption("old opcode, no longer in use")
def SETUP_LOOP(self, offsettoend, next_instr):
block = LoopBlock(self, next_instr + offsettoend, self.lastblock)
diff --git a/pypy/interpreter/test/test_exceptcomp.py b/pypy/interpreter/test/test_exceptcomp.py
--- a/pypy/interpreter/test/test_exceptcomp.py
+++ b/pypy/interpreter/test/test_exceptcomp.py
@@ -7,7 +7,7 @@
def test_exception(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except TypeError:
pass
except:
@@ -15,7 +15,7 @@
def test_exceptionfail(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except KeyError:
self.fail("Different exceptions match.")
except TypeError:
@@ -47,7 +47,7 @@
class UserExcept(Exception):
pass
try:
- raise UserExcept, "nothing"
+ raise UserExcept("nothing")
except UserExcept:
pass
except:
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -13,7 +13,7 @@
# XXX why is this called newstring?
import sys
def f():
- raise TypeError, "hello"
+ raise TypeError("hello")
def g():
f()
@@ -23,7 +23,7 @@
except:
typ,val,tb = sys.exc_info()
else:
- raise AssertionError, "should have raised"
+ raise AssertionError("should have raised")
assert hasattr(tb, 'tb_frame')
assert hasattr(tb, 'tb_lasti')
assert hasattr(tb, 'tb_lineno')
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -688,7 +688,7 @@
def test_catch_attributeerror_of_descriptor(self):
def booh(self):
- raise this_exception, "booh"
+ raise this_exception("booh")
class E:
__eq__ = property(booh)
diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py
--- a/pypy/module/__builtin__/test/test_descriptor.py
+++ b/pypy/module/__builtin__/test/test_descriptor.py
@@ -322,7 +322,7 @@
except ZeroDivisionError:
pass
else:
- raise Exception, "expected ZeroDivisionError from bad property"
+ raise Exception("expected ZeroDivisionError from bad property")
def test_property_subclass(self):
class P(property):
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -1135,7 +1135,7 @@
if fullname in self.namestoblock:
return self
def load_module(self, fullname):
- raise ImportError, "blocked"
+ raise ImportError("blocked")
import sys, imp
modname = "errno" # an arbitrary harmless builtin module
diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py
--- a/pypy/module/posix/app_posix.py
+++ b/pypy/module/posix/app_posix.py
@@ -199,10 +199,10 @@
g._childpid = childpid
return g
- except Exception, e:
+ except Exception as e:
try_close(write_end)
try_close(read_end)
- raise Exception, e # bare 'raise' does not work here :-(
+ raise e # bare 'raise' does not work here :-(
def wait():
""" wait() -> (pid, status)
diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py
--- a/pypy/module/signal/test/test_signal.py
+++ b/pypy/module/signal/test/test_signal.py
@@ -197,7 +197,7 @@
except OSError:
pass
else:
- raise AssertionError, "os.read(fd_read, 1) succeeded?"
+ raise AssertionError("os.read(fd_read, 1) succeeded?")
#
fd_read, fd_write = posix.pipe()
flags = fcntl.fcntl(fd_write, fcntl.F_GETFL, 0)
diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py
--- a/pypy/module/sys/app.py
+++ b/pypy/module/sys/app.py
@@ -58,7 +58,7 @@
# note that we cannot use SystemExit(exitcode) here.
# The comma version leads to an extra de-tupelizing
# in normalize_exception, which is exactly like CPython's.
- raise SystemExit, exitcode
+ raise SystemExit(exitcode)
def exitfunc():
"""Placeholder for sys.exitfunc(), which is called when PyPy exits."""
diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
--- a/pypy/module/sys/test/test_sysmodule.py
+++ b/pypy/module/sys/test/test_sysmodule.py
@@ -83,7 +83,7 @@
etype, val, tb = sys.exc_info()
assert isinstance(val, etype)
else:
- raise AssertionError, "ZeroDivisionError not caught"
+ raise AssertionError("ZeroDivisionError not caught")
def test_io(self):
import sys
@@ -280,7 +280,7 @@
def clear():
try:
- raise ValueError, 42
+ raise ValueError(42)
except ValueError, exc:
clear_check(exc)
@@ -290,7 +290,7 @@
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
- raise ValueError, 13
+ raise ValueError(13)
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
@@ -314,9 +314,9 @@
except SystemExit, exc:
assert exc.code == 0
except:
- raise AssertionError, "wrong exception"
+ raise AssertionError("wrong exception")
else:
- raise AssertionError, "no exception"
+ raise AssertionError("no exception")
# call with tuple argument with one entry
# entry will be unpacked
@@ -325,9 +325,9 @@
except SystemExit, exc:
assert exc.code == 42
except:
- raise AssertionError, "wrong exception"
+ raise AssertionError("wrong exception")
else:
- raise AssertionError, "no exception"
+ raise AssertionError("no exception")
# call with integer argument
try:
@@ -335,9 +335,9 @@
except SystemExit, exc:
assert exc.code == 42
except:
- raise AssertionError, "wrong exception"
+ raise AssertionError("wrong exception")
else:
- raise AssertionError, "no exception"
+ raise AssertionError("no exception")
# call with string argument
try:
@@ -345,9 +345,9 @@
except SystemExit, exc:
assert exc.code == "exit"
except:
- raise AssertionError, "wrong exception"
+ raise AssertionError("wrong exception")
else:
- raise AssertionError, "no exception"
+ raise AssertionError("no exception")
# call with tuple argument with two entries
try:
@@ -355,9 +355,9 @@
except SystemExit, exc:
assert exc.code == (17, 23)
except:
- raise AssertionError, "wrong exception"
+ raise AssertionError("wrong exception")
else:
- raise AssertionError, "no exception"
+ raise AssertionError("no exception")
def test_getdefaultencoding(self):
import sys
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callback_traceback.py b/pypy/module/test_lib_pypy/ctypes_tests/test_callback_traceback.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_callback_traceback.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callback_traceback.py
@@ -5,7 +5,7 @@
def callback_func(arg):
42 / arg
- raise ValueError, arg
+ raise ValueError(arg)
class TestCallbackTraceback:
# When an exception is raised in a ctypes callback function, the C
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_cfuncs.py b/pypy/module/test_lib_pypy/ctypes_tests/test_cfuncs.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_cfuncs.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_cfuncs.py
@@ -190,7 +190,7 @@
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
- raise AttributeError, name
+ raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py b/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py
@@ -513,7 +513,7 @@
assert ("Structure or union cannot contain itself" in
str(details))
else:
- raise AssertionError, "Structure or union cannot contain itself"
+ raise AssertionError("Structure or union cannot contain itself")
def test_vice_versa(self):
py.test.skip("mutually dependent lazily defined structures error semantics")
@@ -530,7 +530,7 @@
assert ("_fields_ is final" in
str(details))
else:
- raise AssertionError, "AttributeError not raised"
+ raise AssertionError("AttributeError not raised")
def test_nonfinal_struct(self):
class X(Structure):
@@ -558,7 +558,7 @@
_fields_ = [('x', c_int)]
def __getattr__(self, name):
- raise AttributeError, name
+ raise AttributeError(name)
x = X()
assert x.x == 0
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -893,4 +893,4 @@
elif _name not in ['is_', 'id','type','issubtype', 'int',
# not really to be defined in DescrOperation
'ord', 'unichr', 'unicode']:
- raise Exception, "missing def for operation %s" % _name
+ raise Exception("missing def for operation %s" % _name)
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -143,7 +143,7 @@
if x is None:
return self.w_None
if isinstance(x, OperationError):
- raise TypeError, ("attempt to wrap already wrapped exception: %s"%
+ raise TypeError("attempt to wrap already wrapped exception: %s"%
(x,))
if isinstance(x, int):
if isinstance(x, bool):
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -241,7 +241,7 @@
return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
def mro(instance):
if instance.flag > 0:
- raise RuntimeError, "bozo"
+ raise RuntimeError("bozo")
else:
instance.flag += 1
return type.mro(instance)
diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py
--- a/pypy/objspace/test/test_descroperation.py
+++ b/pypy/objspace/test/test_descroperation.py
@@ -149,7 +149,7 @@
def __setslice__(self, start, stop, sequence):
ops.append((start, stop, sequence))
def __setitem__(self, key, value):
- raise AssertionError, key
+ raise AssertionError(key)
def __len__(self):
return 100
@@ -174,7 +174,7 @@
def __delslice__(self, start, stop):
ops.append((start, stop))
def __delitem__(self, key):
- raise AssertionError, key
+ raise AssertionError(key)
def __len__(self):
return 100
diff --git a/pypy/tool/importfun.py b/pypy/tool/importfun.py
--- a/pypy/tool/importfun.py
+++ b/pypy/tool/importfun.py
@@ -163,7 +163,7 @@
if name in opcode.opmap:
return opcode.opmap[name]
else:
- raise AttributeError, name
+ raise AttributeError(name)
_op_ = _Op()
diff --git a/pypy/tool/isolate.py b/pypy/tool/isolate.py
--- a/pypy/tool/isolate.py
+++ b/pypy/tool/isolate.py
@@ -50,7 +50,7 @@
if exc_type_module == 'exceptions':
raise getattr(exceptions, exc_type_name)
else:
- raise IsolateException, "%s.%s" % value
+ raise IsolateException("%s.%s" % value)
def _close(self):
if not self._closed:
diff --git a/pypy/tool/pydis.py b/pypy/tool/pydis.py
--- a/pypy/tool/pydis.py
+++ b/pypy/tool/pydis.py
@@ -96,8 +96,8 @@
for bytecode in self.bytecodes:
if bytecode.index == index:
return bytecode
- raise ValueError, "no bytecode found on index %s in code \n%s" % (
- index, pydis(self.code))
+ raise ValueError("no bytecode found on index %s in code \n%s" % (
+ index, pydis(self.code)))
def format(self):
lastlineno = -1
diff --git a/pypy/tool/pytest/test/test_pytestsupport.py b/pypy/tool/pytest/test/test_pytestsupport.py
--- a/pypy/tool/pytest/test/test_pytestsupport.py
+++ b/pypy/tool/pytest/test/test_pytestsupport.py
@@ -49,7 +49,7 @@
except AssertionError:
pass
else:
- raise AssertionError, "app level AssertionError mixup!"
+ raise AssertionError("app level AssertionError mixup!")
def app_test_exception_with_message():
try:
diff --git a/pypy/tool/rest/rst.py b/pypy/tool/rest/rst.py
--- a/pypy/tool/rest/rst.py
+++ b/pypy/tool/rest/rst.py
@@ -128,7 +128,7 @@
outcome = []
if (isinstance(self.children[0], Transition) or
isinstance(self.children[-1], Transition)):
- raise ValueError, ('document must not begin or end with a '
+ raise ValueError('document must not begin or end with a '
'transition')
for child in self.children:
outcome.append(child.text())
diff --git a/pypy/tool/test/isolate_simple.py b/pypy/tool/test/isolate_simple.py
--- a/pypy/tool/test/isolate_simple.py
+++ b/pypy/tool/test/isolate_simple.py
@@ -3,13 +3,13 @@
return a+b
def g():
- raise ValueError, "booh"
+ raise ValueError("booh")
class FancyException(Exception):
pass
def h():
- raise FancyException, "booh"
+ raise FancyException("booh")
def bomb():
raise KeyboardInterrupt
From pypy.commits at gmail.com Mon May 2 14:37:52 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 11:37:52 -0700 (PDT)
Subject: [pypy-commit] pypy default: revert wrong change in d1f09c46b8e7
Message-ID: <57279e80.8344c20a.c3eed.6145@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84140:7fb700345dee
Date: 2016-05-02 19:36 +0100
http://bitbucket.org/pypy/pypy/changeset/7fb700345dee/
Log: revert wrong change in d1f09c46b8e7
diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py
--- a/pypy/module/sys/app.py
+++ b/pypy/module/sys/app.py
@@ -58,7 +58,7 @@
# note that we cannot use SystemExit(exitcode) here.
# The comma version leads to an extra de-tupelizing
# in normalize_exception, which is exactly like CPython's.
- raise SystemExit(exitcode)
+ raise SystemExit, exitcode
def exitfunc():
"""Placeholder for sys.exitfunc(), which is called when PyPy exits."""
From pypy.commits at gmail.com Mon May 2 15:32:55 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 12:32:55 -0700 (PDT)
Subject: [pypy-commit] pypy default: Don't use deprecated raise statement
syntax
Message-ID: <5727ab67.876cc20a.6f1dd.71b8@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84141:a6cd96c17732
Date: 2016-05-02 20:32 +0100
http://bitbucket.org/pypy/pypy/changeset/a6cd96c17732/
Log: Don't use deprecated raise statement syntax
diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py
--- a/rpython/jit/backend/detect_cpu.py
+++ b/rpython/jit/backend/detect_cpu.py
@@ -35,7 +35,7 @@
if not getdefined(macro, ''):
continue
return k
- raise ProcessorAutodetectError, "Cannot detect processor using compiler macros"
+ raise ProcessorAutodetectError("Cannot detect processor using compiler macros")
def detect_model_from_host_platform():
@@ -52,7 +52,7 @@
# assume we have 'uname'
mach = os.popen('uname -m', 'r').read().strip()
if not mach:
- raise ProcessorAutodetectError, "cannot run 'uname -m'"
+ raise ProcessorAutodetectError("cannot run 'uname -m'")
#
result ={'i386': MODEL_X86,
'i486': MODEL_X86,
@@ -74,7 +74,7 @@
}.get(mach)
if result is None:
- raise ProcessorAutodetectError, "unknown machine name %s" % mach
+ raise ProcessorAutodetectError("unknown machine name %s" % mach)
#
if result.startswith('x86'):
from rpython.jit.backend.x86 import detect_feature as feature
@@ -128,7 +128,7 @@
elif backend_name == MODEL_S390_64:
return "rpython.jit.backend.zarch.runner", "CPU_S390_64"
else:
- raise ProcessorAutodetectError, (
+ raise ProcessorAutodetectError(
"we have no JIT backend for this cpu: '%s'" % backend_name)
def getcpuclass(backend_name="auto"):
diff --git a/rpython/jit/backend/ppc/form.py b/rpython/jit/backend/ppc/form.py
--- a/rpython/jit/backend/ppc/form.py
+++ b/rpython/jit/backend/ppc/form.py
@@ -48,7 +48,7 @@
def __call__(self, *args, **kw):
fieldvalues, sparefields = self.calc_fields(args, kw)
if sparefields:
- raise FormException, 'fields %s left'%sparefields
+ raise FormException('fields %s left'%sparefields)
self.assembler.insts.append(Instruction(fieldvalues))
@@ -72,7 +72,7 @@
self.boundtype = boundtype
for field in specializations:
if field not in fields:
- raise FormException, field
+ raise FormException(field)
def __get__(self, ob, cls=None):
if ob is None: return self
@@ -91,14 +91,14 @@
for fname, v in more_specializatons.iteritems():
field = self.fieldmap[fname]
if field not in self.fields:
- raise FormException, "don't know about '%s' here" % field
+ raise FormException("don't know about '%s' here" % field)
if isinstance(v, str):
ds[field] = self.fieldmap[v]
else:
ms[field] = v
s.update(ms)
if len(s) != len(self.specializations) + len(ms):
- raise FormException, "respecialization not currently allowed"
+ raise FormException("respecialization not currently allowed")
if ds:
fields = list(self.fields)
for field in ds:
@@ -175,8 +175,8 @@
overlap = True
for b in range(field.left, field.right+1):
if not overlap and b in bits:
- raise FormException, "'%s' and '%s' clash at bit '%s'"%(
- bits[b], fname, b)
+ raise FormException("'%s' and '%s' clash at bit '%s'"%(
+ bits[b], fname, b))
else:
bits[b] = fname
self.fields.append(field)
@@ -186,7 +186,7 @@
for fname in specializations:
field = self.fieldmap[fname]
if field not in self.fields:
- raise FormException, "no nothin bout '%s'"%fname
+ raise FormException("no nothin bout '%s'"%fname)
s[field] = specializations[fname]
return IDesc(self.fieldmap, self.fields, s)
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2100,7 +2100,7 @@
guard_op = self.history.record(opnum, moreargs,
lltype.nullptr(llmemory.GCREF.TO))
else:
- guard_op = self.history.record(opnum, moreargs, None)
+ guard_op = self.history.record(opnum, moreargs, None)
self.capture_resumedata(resumepc)
# ^^^ records extra to history
self.staticdata.profiler.count_ops(opnum, Counters.GUARDS)
@@ -2254,7 +2254,7 @@
def execute_raised(self, exception, constant=False):
if isinstance(exception, jitexc.JitException):
- raise jitexc.JitException, exception # go through
+ raise exception # go through
llexception = jitexc.get_llexception(self.cpu, exception)
self.execute_ll_raised(llexception, constant)
diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py
--- a/rpython/jit/metainterp/warmspot.py
+++ b/rpython/jit/metainterp/warmspot.py
@@ -82,7 +82,7 @@
backendopt=False, trace_limit=sys.maxint, inline=False,
loop_longevity=0, retrace_limit=5, function_threshold=4,
disable_unrolling=sys.maxint,
- enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15,
+ enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15,
max_unroll_recursion=7, vec=1, vec_all=0, vec_cost=0,
vec_length=60, vec_ratio=2, vec_guard_ratio=3, **kwds):
from rpython.config.config import ConfigError
@@ -489,7 +489,7 @@
if opencoder_model == 'big':
self.metainterp_sd.opencoder_model = BigModel
else:
- self.metainterp_sd.opencoder_model = Model
+ self.metainterp_sd.opencoder_model = Model
self.stats.metainterp_sd = self.metainterp_sd
def make_virtualizable_infos(self):
@@ -934,7 +934,7 @@
raise LLException(ts.get_typeptr(value), value)
else:
value = cast_base_ptr_to_instance(Exception, value)
- raise Exception, value
+ raise value
def handle_jitexception(e):
# XXX the bulk of this function is mostly a copy-paste from above
@@ -968,7 +968,7 @@
raise LLException(ts.get_typeptr(value), value)
else:
value = cast_base_ptr_to_instance(Exception, value)
- raise Exception, value
+ raise value
jd._ll_portal_runner = ll_portal_runner # for debugging
jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE,
diff --git a/rpython/tool/frozenlist.py b/rpython/tool/frozenlist.py
--- a/rpython/tool/frozenlist.py
+++ b/rpython/tool/frozenlist.py
@@ -1,7 +1,7 @@
from rpython.tool.sourcetools import func_with_new_name
def forbid(*args):
- raise TypeError, "cannot mutate a frozenlist"
+ raise TypeError("cannot mutate a frozenlist")
class frozenlist(list):
__setitem__ = func_with_new_name(forbid, '__setitem__')
From pypy.commits at gmail.com Mon May 2 15:40:49 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 12:40:49 -0700 (PDT)
Subject: [pypy-commit] pypy default: Don't use deprecated except clause
syntax (rpython/)
Message-ID: <5727ad41.2472c20a.cee8.439b@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84142:287a8aa3fdd9
Date: 2016-05-02 20:40 +0100
http://bitbucket.org/pypy/pypy/changeset/287a8aa3fdd9/
Log: Don't use deprecated except clause syntax (rpython/)
diff too long, truncating to 2000 out of 3210 lines
diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py
--- a/rpython/annotator/annrpython.py
+++ b/rpython/annotator/annrpython.py
@@ -342,10 +342,10 @@
del self.blocked_blocks[block]
try:
self.flowin(graph, block)
- except BlockedInference, e:
+ except BlockedInference as e:
self.annotated[block] = False # failed, hopefully temporarily
self.blocked_blocks[block] = (graph, e.opindex)
- except Exception, e:
+ except Exception as e:
# hack for debug tools only
if not hasattr(e, '__annotator_block'):
setattr(e, '__annotator_block', block)
@@ -379,7 +379,7 @@
oldcells = [self.binding(a) for a in block.inputargs]
try:
unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)]
- except annmodel.UnionError, e:
+ except annmodel.UnionError as e:
# Add source code to the UnionError
e.source = '\n'.join(source_lines(graph, block, None, long=True))
raise
diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py
--- a/rpython/annotator/description.py
+++ b/rpython/annotator/description.py
@@ -278,7 +278,7 @@
defs_s.append(self.bookkeeper.immutablevalue(x))
try:
inputcells = args.match_signature(signature, defs_s)
- except ArgErr, e:
+ except ArgErr as e:
raise AnnotatorError("signature mismatch: %s() %s" %
(self.name, e.getmsg()))
return inputcells
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -902,7 +902,7 @@
def f(l):
try:
l[0]
- except (KeyError, IndexError),e:
+ except (KeyError, IndexError) as e:
return e
return None
diff --git a/rpython/bin/translatorshell.py b/rpython/bin/translatorshell.py
--- a/rpython/bin/translatorshell.py
+++ b/rpython/bin/translatorshell.py
@@ -61,7 +61,7 @@
if __name__ == '__main__':
try:
setup_readline()
- except ImportError, err:
+ except ImportError as err:
print "Disabling readline support (%s)" % err
from rpython.translator.test import snippet
from rpython.rtyper.rtyper import RPythonTyper
diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py
--- a/rpython/flowspace/model.py
+++ b/rpython/flowspace/model.py
@@ -677,7 +677,7 @@
assert len(allexitcases) == len(block.exits)
vars_previous_blocks.update(vars)
- except AssertionError, e:
+ except AssertionError as e:
# hack for debug tools only
#graph.show() # <== ENABLE THIS TO SEE THE BROKEN GRAPH
if block and not hasattr(e, '__annotator_block'):
diff --git a/rpython/jit/backend/arm/test/support.py b/rpython/jit/backend/arm/test/support.py
--- a/rpython/jit/backend/arm/test/support.py
+++ b/rpython/jit/backend/arm/test/support.py
@@ -67,7 +67,7 @@
func(*args, **kwargs)
try:
f_name = name[:name.index('_')]
- except ValueError, e:
+ except ValueError as e:
f_name = name
self.assert_equal('%s%s %s' % (f_name, asm_ext, asm))
return f
diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -404,7 +404,7 @@
try:
frame.execute(lltrace)
assert False
- except ExecutionFinished, e:
+ except ExecutionFinished as e:
return e.deadframe
def get_value_direct(self, deadframe, tp, index):
@@ -1097,7 +1097,7 @@
execute = getattr(self, 'execute_' + op.getopname())
try:
resval = execute(_getdescr(op), *args)
- except Jump, j:
+ except Jump as j:
self.lltrace, i = j.jump_target
if i >= 0:
label_op = self.lltrace.operations[i]
@@ -1348,7 +1348,7 @@
try:
res = self.cpu.maybe_on_top_of_llinterp(func, call_args, TP.RESULT)
self.last_exception = None
- except LLException, lle:
+ except LLException as lle:
self.last_exception = lle
res = _example_res[getkind(TP.RESULT)[0]]
return res
@@ -1444,7 +1444,7 @@
assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish
try:
result = assembler_helper_ptr(pframe, vable)
- except LLException, lle:
+ except LLException as lle:
assert self.last_exception is None, "exception left behind"
self.last_exception = lle
# fish op
diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py
--- a/rpython/jit/backend/llsupport/llmodel.py
+++ b/rpython/jit/backend/llsupport/llmodel.py
@@ -144,7 +144,7 @@
# all other fields are empty
llop.gc_writebarrier(lltype.Void, new_frame)
return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame)
- except Exception, e:
+ except Exception as e:
print "Unhandled exception", e, "in realloc_frame"
return lltype.nullptr(llmemory.GCREF.TO)
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -176,7 +176,7 @@
cls.cbuilder = compile(get_entry(allfuncs), cls.gc,
gcrootfinder=cls.gcrootfinder, jit=True,
thread=True)
- except ConfigError, e:
+ except ConfigError as e:
assert str(e).startswith('invalid value asmgcc')
py.test.skip('asmgcc not supported')
finally:
diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
@@ -34,7 +34,7 @@
try:
rvmprof.register_code_object_class(MyCode, get_name)
- except rvmprof.VMProfPlatformUnsupported, e:
+ except rvmprof.VMProfPlatformUnsupported as e:
py.test.skip(str(e))
def get_unique_id(code):
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -288,7 +288,7 @@
def main(i):
try:
myportal(i)
- except ImDone, e:
+ except ImDone as e:
return e.resvalue
# XXX custom fishing, depends on the exact env var and format
@@ -297,7 +297,7 @@
try:
res = self.meta_interp(main, [400])
assert res == main(400)
- except ConfigError,e:
+ except ConfigError as e:
assert str(e).startswith('invalid value asmgcc')
py.test.skip('asmgcc not supported')
finally:
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -780,7 +780,7 @@
return [SpaceOperation('-live-', [], None),
SpaceOperation('getfield_vable_%s' % kind,
[v_inst, descr], op.result)]
- except VirtualizableArrayField, e:
+ except VirtualizableArrayField as e:
# xxx hack hack hack
vinfo = e.args[1]
arrayindex = vinfo.array_field_counter[op.args[1].value]
diff --git a/rpython/jit/codewriter/policy.py b/rpython/jit/codewriter/policy.py
--- a/rpython/jit/codewriter/policy.py
+++ b/rpython/jit/codewriter/policy.py
@@ -103,7 +103,7 @@
getkind(v.concretetype, supports_floats,
supports_longlong,
supports_singlefloats)
- except NotImplementedError, e:
+ except NotImplementedError as e:
log.WARNING('%s, ignoring graph' % (e,))
log.WARNING(' %s' % (graph,))
return True
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -371,7 +371,7 @@
def f(i):
try:
g(i)
- except FooError, e:
+ except FooError as e:
return e.num
except Exception:
return 3
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -1363,7 +1363,7 @@
tr = Transformer()
try:
tr.rewrite_operation(op)
- except Exception, e:
+ except Exception as e:
assert 'foobar' in str(e)
def test_likely_unlikely():
diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py
--- a/rpython/jit/codewriter/test/test_regalloc.py
+++ b/rpython/jit/codewriter/test/test_regalloc.py
@@ -272,7 +272,7 @@
kref2 = bar(kref)
try:
return g(n)
- except FooError, e:
+ except FooError as e:
if foo(e):
return kref
else:
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -172,7 +172,7 @@
# call the method bhimpl_xxx()
try:
result = unboundmethod(*args)
- except Exception, e:
+ except Exception as e:
if verbose and not we_are_translated():
print '-> %s!' % (e.__class__.__name__,)
if resulttype == 'i' or resulttype == 'r' or resulttype == 'f':
@@ -323,7 +323,7 @@
break
except jitexc.JitException:
raise # go through
- except Exception, e:
+ except Exception as e:
lle = get_llexception(self.cpu, e)
self.handle_exception_in_frame(lle)
@@ -1540,9 +1540,9 @@
# we now proceed to interpret the bytecode in this frame
self.run()
#
- except jitexc.JitException, e:
+ except jitexc.JitException as e:
raise # go through
- except Exception, e:
+ except Exception as e:
# if we get an exception, return it to the caller frame
current_exc = get_llexception(self.cpu, e)
if not self.nextblackholeinterp:
@@ -1673,7 +1673,7 @@
# We have reached a recursive portal level.
try:
blackholeinterp._handle_jitexception_in_portal(exc)
- except Exception, e:
+ except Exception as e:
# It raised a general exception (it should not be a JitException here).
lle = get_llexception(blackholeinterp.cpu, e)
else:
diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
--- a/rpython/jit/metainterp/executor.py
+++ b/rpython/jit/metainterp/executor.py
@@ -51,28 +51,28 @@
if rettype == INT:
try:
result = cpu.bh_call_i(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
result = 0
return result
if rettype == REF:
try:
result = cpu.bh_call_r(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
result = NULL
return result
if rettype == FLOAT:
try:
result = cpu.bh_call_f(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
result = longlong.ZEROF
return result
if rettype == VOID:
try:
cpu.bh_call_v(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
return None
raise AssertionError("bad rettype")
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py b/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
@@ -39,7 +39,7 @@
def raises(self, e, fn, *args):
try:
fn(*args)
- except Exception, e:
+ except Exception as e:
return e
opt = allopts[optnum]
diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py
--- a/rpython/jit/metainterp/optimizeopt/virtualstate.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py
@@ -91,7 +91,7 @@
state.renum[self.position] = other.position
try:
self._generate_guards(other, op, runtime_op, state)
- except VirtualStatesCantMatch, e:
+ except VirtualStatesCantMatch as e:
state.bad[self] = state.bad[other] = None
if e.state is None:
e.state = state
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2034,7 +2034,7 @@
else:
try:
self.compile_done_with_this_frame(resultbox)
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.aborted_tracing(stb.reason)
sd = self.staticdata
result_type = self.jitdriver_sd.result_type
@@ -2067,7 +2067,7 @@
self.popframe()
try:
self.compile_exit_frame_with_exception(self.last_exc_box)
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.aborted_tracing(stb.reason)
raise jitexc.ExitFrameWithExceptionRef(self.cpu, lltype.cast_opaque_ptr(llmemory.GCREF, excvalue))
@@ -2367,7 +2367,7 @@
self.seen_loop_header_for_jdindex = -1
try:
self.interpret()
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.run_blackhole_interp_to_cancel_tracing(stb)
assert False, "should always raise"
@@ -2404,7 +2404,7 @@
if self.resumekey_original_loop_token is None: # very rare case
raise SwitchToBlackhole(Counters.ABORT_BRIDGE)
self.interpret()
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.run_blackhole_interp_to_cancel_tracing(stb)
assert False, "should always raise"
@@ -3276,7 +3276,7 @@
print '\tpyjitpl: %s(%s)' % (name, ', '.join(map(repr, args))),
try:
resultbox = unboundmethod(self, *args)
- except Exception, e:
+ except Exception as e:
if self.debug:
print '-> %s!' % e.__class__.__name__
raise
diff --git a/rpython/jit/metainterp/test/test_blackhole.py b/rpython/jit/metainterp/test/test_blackhole.py
--- a/rpython/jit/metainterp/test/test_blackhole.py
+++ b/rpython/jit/metainterp/test/test_blackhole.py
@@ -205,7 +205,7 @@
myjitdriver.jit_merge_point(x=x, y=y)
try:
choices(x)
- except FooError, e:
+ except FooError as e:
if e.num == 0:
break
y += e.num
diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py
--- a/rpython/jit/metainterp/test/test_compile.py
+++ b/rpython/jit/metainterp/test/test_compile.py
@@ -164,7 +164,7 @@
fail_descr = cpu.get_latest_descr(deadframe)
try:
fail_descr.handle_fail(deadframe, FakeMetaInterpSD(), None)
- except jitexc.ExitFrameWithExceptionRef, e:
+ except jitexc.ExitFrameWithExceptionRef as e:
assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), e.value) == llexc
else:
assert 0, "should have raised"
diff --git a/rpython/jit/metainterp/test/test_exception.py b/rpython/jit/metainterp/test/test_exception.py
--- a/rpython/jit/metainterp/test/test_exception.py
+++ b/rpython/jit/metainterp/test/test_exception.py
@@ -17,7 +17,7 @@
def f(n):
try:
return g(n)
- except MyError, e:
+ except MyError as e:
return e.n + 10
res = self.interp_operations(f, [9])
assert res == 8
@@ -141,7 +141,7 @@
try:
b(n)
return 0
- except MyError, e:
+ except MyError as e:
return e.n
def f(n):
return a(n)
@@ -161,7 +161,7 @@
myjitdriver.jit_merge_point(n=n)
try:
check(n, 0)
- except MyError, e:
+ except MyError as e:
n = check(e.n, 1)
return n
assert f(53) == -2
@@ -290,7 +290,7 @@
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = n - check(n)
- except MyError, e:
+ except MyError as e:
return e.n
assert f(53) == -2
res = self.meta_interp(f, [53], policy=StopAtXPolicy(check))
@@ -517,7 +517,7 @@
def f(n):
try:
portal(n)
- except SomeException, e:
+ except SomeException as e:
return 3
return 2
@@ -536,7 +536,7 @@
def main(n):
try:
f(n)
- except MyError, e:
+ except MyError as e:
return e.n
res = self.meta_interp(main, [41], repeat=7)
@@ -572,7 +572,7 @@
try:
f(n)
return 3
- except MyError, e:
+ except MyError as e:
return e.n
except ValueError:
return 8
@@ -590,7 +590,7 @@
def f(x):
try:
return g(x)
- except Exception, e:
+ except Exception as e:
if isinstance(e, OverflowError):
return -42
raise
diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py
--- a/rpython/jit/metainterp/test/test_recursive.py
+++ b/rpython/jit/metainterp/test/test_recursive.py
@@ -729,7 +729,7 @@
if codeno == 2:
try:
portal(1)
- except MyException, me:
+ except MyException as me:
i += me.x
i += 1
if codeno == 1:
@@ -1092,7 +1092,7 @@
if codeno < 10:
try:
portal(codeno + 5, k+1)
- except GotValue, e:
+ except GotValue as e:
i += e.result
codeno += 1
elif codeno == 10:
@@ -1106,7 +1106,7 @@
def main(codeno, k):
try:
portal(codeno, k)
- except GotValue, e:
+ except GotValue as e:
return e.result
assert main(0, 1) == 2095
diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py
--- a/rpython/jit/metainterp/test/test_virtualizable.py
+++ b/rpython/jit/metainterp/test/test_virtualizable.py
@@ -665,7 +665,7 @@
jitdriver.jit_merge_point(frame=frame)
try:
g()
- except FooError, e:
+ except FooError as e:
frame.x -= e.value
frame.y += 1
return frame.x
diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py
--- a/rpython/jit/metainterp/test/test_warmspot.py
+++ b/rpython/jit/metainterp/test/test_warmspot.py
@@ -45,7 +45,7 @@
def main(a):
try:
interpreter_loop(a)
- except Exit, e:
+ except Exit as e:
return e.result
res = self.meta_interp(main, [1])
@@ -674,7 +674,7 @@
assert jd._assembler_call_helper(FakeDeadFrame(1), 0) == 10
try:
jd._assembler_call_helper(FakeDeadFrame(3), 0)
- except LLException, lle:
+ except LLException as lle:
assert lle[0] == self.exc_vtable
else:
py.test.fail("DID NOT RAISE")
diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py
--- a/rpython/jit/metainterp/warmspot.py
+++ b/rpython/jit/metainterp/warmspot.py
@@ -543,7 +543,7 @@
raise # go through
except StackOverflow:
raise # go through
- except Exception, e:
+ except Exception as e:
if not we_are_translated():
print "~~~ Crash in JIT!"
print '~~~ %s: %s' % (e.__class__, e)
@@ -908,7 +908,7 @@
# want to interrupt the whole interpreter loop.
return support.maybe_on_top_of_llinterp(rtyper,
portal_ptr)(*args)
- except jitexc.ContinueRunningNormally, e:
+ except jitexc.ContinueRunningNormally as e:
args = ()
for ARGTYPE, attrname, count in portalfunc_ARGS:
x = getattr(e, attrname)[count]
@@ -919,16 +919,16 @@
except jitexc.DoneWithThisFrameVoid:
assert result_kind == 'void'
return
- except jitexc.DoneWithThisFrameInt, e:
+ except jitexc.DoneWithThisFrameInt as e:
assert result_kind == 'int'
return specialize_value(RESULT, e.result)
- except jitexc.DoneWithThisFrameRef, e:
+ except jitexc.DoneWithThisFrameRef as e:
assert result_kind == 'ref'
return specialize_value(RESULT, e.result)
- except jitexc.DoneWithThisFrameFloat, e:
+ except jitexc.DoneWithThisFrameFloat as e:
assert result_kind == 'float'
return specialize_value(RESULT, e.result)
- except jitexc.ExitFrameWithExceptionRef, e:
+ except jitexc.ExitFrameWithExceptionRef as e:
value = ts.cast_to_baseclass(e.value)
if not we_are_translated():
raise LLException(ts.get_typeptr(value), value)
@@ -940,7 +940,7 @@
# XXX the bulk of this function is mostly a copy-paste from above
try:
raise e
- except jitexc.ContinueRunningNormally, e:
+ except jitexc.ContinueRunningNormally as e:
args = ()
for ARGTYPE, attrname, count in portalfunc_ARGS:
x = getattr(e, attrname)[count]
@@ -953,16 +953,16 @@
except jitexc.DoneWithThisFrameVoid:
assert result_kind == 'void'
return
- except jitexc.DoneWithThisFrameInt, e:
+ except jitexc.DoneWithThisFrameInt as e:
assert result_kind == 'int'
return e.result
- except jitexc.DoneWithThisFrameRef, e:
+ except jitexc.DoneWithThisFrameRef as e:
assert result_kind == 'ref'
return e.result
- except jitexc.DoneWithThisFrameFloat, e:
+ except jitexc.DoneWithThisFrameFloat as e:
assert result_kind == 'float'
return e.result
- except jitexc.ExitFrameWithExceptionRef, e:
+ except jitexc.ExitFrameWithExceptionRef as e:
value = ts.cast_to_baseclass(e.value)
if not we_are_translated():
raise LLException(ts.get_typeptr(value), value)
@@ -986,7 +986,7 @@
fail_descr = self.cpu.get_latest_descr(deadframe)
try:
fail_descr.handle_fail(deadframe, self.metainterp_sd, jd)
- except jitexc.JitException, e:
+ except jitexc.JitException as e:
return handle_jitexception(e)
else:
assert 0, "should have raised"
diff --git a/rpython/jit/tl/test/test_pypyjit.py b/rpython/jit/tl/test/test_pypyjit.py
--- a/rpython/jit/tl/test/test_pypyjit.py
+++ b/rpython/jit/tl/test/test_pypyjit.py
@@ -21,7 +21,7 @@
def check_crasher(func_name):
try:
JIT_EXECUTABLE.sysexec(CRASH_FILE, func_name)
- except py.process.cmdexec.Error, e:
+ except py.process.cmdexec.Error as e:
print "stderr"
print "------"
print e.err
diff --git a/rpython/memory/gctransform/support.py b/rpython/memory/gctransform/support.py
--- a/rpython/memory/gctransform/support.py
+++ b/rpython/memory/gctransform/support.py
@@ -80,7 +80,7 @@
def ll_call_destructor(destrptr, destr_v, typename):
try:
destrptr(destr_v)
- except Exception, e:
+ except Exception as e:
try:
write(2, "a destructor of type ")
write(2, typename)
diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py
--- a/rpython/memory/gctransform/transform.py
+++ b/rpython/memory/gctransform/transform.py
@@ -129,7 +129,7 @@
raise_analyzer,
cleanup=False)
must_constfold = True
- except inline.CannotInline, e:
+ except inline.CannotInline as e:
print 'CANNOT INLINE:', e
print '\t%s into %s' % (inline_graph, graph)
cleanup_graph(graph)
diff --git a/rpython/rlib/parsing/main.py b/rpython/rlib/parsing/main.py
--- a/rpython/rlib/parsing/main.py
+++ b/rpython/rlib/parsing/main.py
@@ -7,7 +7,7 @@
try:
t = py.path.local(filename).read(mode='U')
regexs, rules, ToAST = parse_ebnf(t)
- except ParseError, e:
+ except ParseError as e:
print e.nice_error_message(filename=filename, source=t)
raise
return make_parse_function(regexs, rules, eof=True)
diff --git a/rpython/rlib/parsing/makepackrat.py b/rpython/rlib/parsing/makepackrat.py
--- a/rpython/rlib/parsing/makepackrat.py
+++ b/rpython/rlib/parsing/makepackrat.py
@@ -632,7 +632,7 @@
p = PyPackratSyntaxParser(source)
try:
t = p.file()
- except BacktrackException, exc:
+ except BacktrackException as exc:
print exc.error.nice_error_message("", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
diff --git a/rpython/rlib/parsing/pypackrat.py b/rpython/rlib/parsing/pypackrat.py
--- a/rpython/rlib/parsing/pypackrat.py
+++ b/rpython/rlib/parsing/pypackrat.py
@@ -29,7 +29,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -61,7 +61,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -93,7 +93,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -125,7 +125,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -167,14 +167,14 @@
_result = _call_status.result
_error = _call_status.error
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
@@ -197,7 +197,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -231,7 +231,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -265,7 +265,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -299,7 +299,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -360,7 +360,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -403,7 +403,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -433,7 +433,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -480,7 +480,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -504,7 +504,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -551,7 +551,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -569,7 +569,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -586,7 +586,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
@@ -600,7 +600,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -623,7 +623,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -670,7 +670,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -691,7 +691,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -705,14 +705,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -730,7 +730,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
@@ -744,21 +744,21 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
@@ -781,7 +781,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -833,7 +833,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -856,14 +856,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
@@ -875,7 +875,7 @@
last = _result
_result = (Nonterminal('or', l + [last]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
@@ -884,7 +884,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
@@ -909,7 +909,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -976,7 +976,7 @@
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -984,7 +984,7 @@
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
@@ -993,7 +993,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -1018,7 +1018,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1073,7 +1073,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1115,7 +1115,7 @@
_result = _call_status.result
_error = _call_status.error
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
@@ -1124,7 +1124,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
@@ -1133,7 +1133,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
@@ -1142,7 +1142,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
@@ -1151,7 +1151,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
@@ -1160,7 +1160,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -1185,7 +1185,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1229,7 +1229,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1246,7 +1246,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -1269,7 +1269,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1323,7 +1323,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -1337,7 +1337,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -1354,14 +1354,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
@@ -1375,7 +1375,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
@@ -1392,14 +1392,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
@@ -1412,7 +1412,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
@@ -1429,7 +1429,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
@@ -1453,7 +1453,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1497,7 +1497,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1514,7 +1514,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -1528,7 +1528,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
@@ -1545,7 +1545,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -1572,7 +1572,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1619,7 +1619,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1643,7 +1643,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1690,7 +1690,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1704,7 +1704,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -1731,7 +1731,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1781,7 +1781,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -1795,14 +1795,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
@@ -1819,7 +1819,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -1829,14 +1829,14 @@
try:
_result = self.__chars__('*')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
@@ -1851,14 +1851,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -1874,7 +1874,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
@@ -1884,14 +1884,14 @@
try:
_result = self.__chars__('*')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
@@ -1906,7 +1906,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
@@ -1930,7 +1930,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1977,7 +1977,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -1994,14 +1994,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
@@ -2010,7 +2010,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -2035,7 +2035,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -2082,7 +2082,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -2099,7 +2099,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -2113,14 +2113,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
@@ -2134,7 +2134,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
@@ -2151,7 +2151,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
@@ -2165,14 +2165,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('ignore', [what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice14 = self._pos
@@ -2187,7 +2187,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
@@ -2206,14 +2206,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all19.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice20
break
_result = _all19
_result = _before_discard18
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice21 = self._pos
@@ -2222,7 +2222,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice21
raise BacktrackException(_error)
@@ -2247,7 +2247,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -2289,7 +2289,7 @@
_result = _call_status.result
_error = _call_status.error
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
@@ -2306,14 +2306,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice5 = self._pos
@@ -2330,14 +2330,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -2353,7 +2353,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
@@ -2377,7 +2377,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -2428,7 +2428,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -2451,7 +2451,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -2498,7 +2498,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -2519,7 +2519,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -2533,14 +2533,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -2559,21 +2559,21 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal("args", args + [last]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice12 = self._pos
try:
_result = (Nonterminal("args", []))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice12
raise BacktrackException(_error)
@@ -2596,7 +2596,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
diff --git a/rpython/rlib/parsing/regexparse.py b/rpython/rlib/parsing/regexparse.py
--- a/rpython/rlib/parsing/regexparse.py
+++ b/rpython/rlib/parsing/regexparse.py
@@ -299,7 +299,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -359,7 +359,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -408,7 +408,7 @@
r2 = _result
_result = (r1 | r2)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
@@ -417,7 +417,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
@@ -442,7 +442,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -485,7 +485,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -509,7 +509,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -554,7 +554,7 @@
_result = self.__chars__('*')
_result = (r1.kleene())
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
@@ -566,7 +566,7 @@
_result = self.__chars__('+')
_result = (r1 + r1.kleene())
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
@@ -578,7 +578,7 @@
_result = self.__chars__('?')
_result = (regex.StringExpression("") | r1)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
@@ -595,7 +595,7 @@
_result = self.__chars__('}')
_result = (r1 * n + r1.kleene())
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
@@ -612,7 +612,7 @@
_result = self.__chars__('}')
_result = (r1 * n[0] + reduce(operator.or_, [r1 * i for i in range(n[1] - n[0] + 1)], regex.StringExpression("")))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
@@ -620,7 +620,7 @@
_result = self.__chars__('{')
_result = (regex.StringExpression("{"))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
_choice6 = self._pos
@@ -629,7 +629,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
raise BacktrackException(_error)
@@ -654,7 +654,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -702,7 +702,7 @@
_result = self.__chars__(')')
_result = _before_discard2
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice3 = self._pos
@@ -711,7 +711,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
@@ -722,7 +722,7 @@
cc = _result
_result = (reduce(operator.or_, [regex.RangeExpression(a, chr(ord(a) + b - 1)) for a, b in compress_char_set(cc)]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
@@ -733,7 +733,7 @@
c = _result
_result = (regex.StringExpression(c))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
_choice6 = self._pos
@@ -741,7 +741,7 @@
_result = self.__chars__('.')
_result = (regex.RangeExpression(chr(0), chr(255)))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
_choice7 = self._pos
@@ -749,7 +749,7 @@
_result = self.__chars__('-')
_result = (regex.StringExpression('-'))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice8 = self._pos
@@ -757,7 +757,7 @@
_result = self.__chars__('\\')
_result = (regex.StringExpression('\\'))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
@@ -765,7 +765,7 @@
_result = self.__chars__(']')
_result = (regex.StringExpression(']'))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
@@ -789,7 +789,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -833,7 +833,7 @@
c = _result
_result = (unescape(c))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
@@ -844,7 +844,7 @@
c = _result
_result = (c)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
@@ -871,7 +871,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -903,7 +903,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -935,7 +935,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
From pypy.commits at gmail.com Mon May 2 15:52:05 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 12:52:05 -0700 (PDT)
Subject: [pypy-commit] pypy default: Don't use deprecated except clause
syntax (pypy/)
Message-ID: <5727afe5.508e1c0a.54893.ffff8f98@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84143:a96d4c97f8a1
Date: 2016-05-02 20:51 +0100
http://bitbucket.org/pypy/pypy/changeset/a96d4c97f8a1/
Log: Don't use deprecated except clause syntax (pypy/)
diff too long, truncating to 2000 out of 6227 lines
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,7 +352,7 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -52,7 +52,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -77,7 +77,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return '?'
raise
@@ -318,7 +318,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -406,7 +406,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +440,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +476,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -764,7 +764,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -772,7 +772,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -872,7 +872,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -896,7 +896,7 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -942,7 +942,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -952,7 +952,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -1049,7 +1049,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1167,7 +1167,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1287,7 +1287,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1365,7 +1365,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1375,7 +1375,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1526,7 +1526,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1705,7 +1705,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1716,7 +1716,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1731,7 +1731,7 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
raise OperationError(self.w_TypeError,
self.wrap("argument must be an int, or have a fileno() "
@@ -1746,7 +1746,7 @@
)
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -563,7 +563,7 @@
while pending is not None:
try:
pending.callback(pending.w_obj)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(space, pending.descrname, pending.w_obj)
e.clear(space) # break up reference cycles
pending = pending.next
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -540,7 +540,7 @@
try:
return space.call_method(space.w_object, '__getattribute__',
space.wrap(self), w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# fall-back to the attribute of the underlying 'im_func'
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -686,7 +686,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -703,7 +703,7 @@
space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
raise OperationError(space.w_RuntimeError,
space.wrap("maximum recursion depth exceeded"))
@@ -725,7 +725,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -746,7 +746,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args.prepend(w_obj))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -764,7 +764,7 @@
except DescrMismatch:
raise OperationError(space.w_SystemError,
space.wrap("unexpected DescrMismatch error"))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -784,7 +784,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -804,7 +804,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -824,7 +824,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2, w3]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -845,7 +845,7 @@
self.descr_reqcls,
Arguments(space,
[w1, w2, w3, w4]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -144,7 +144,7 @@
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
space.w_None)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration) or \
e.match(space, space.w_GeneratorExit):
return space.w_None
@@ -197,7 +197,7 @@
results=results, pycode=pycode)
try:
w_result = frame.execute_frame(space.w_None)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -8,7 +8,7 @@
w_modules = space.sys.get('modules')
try:
return space.getitem(w_modules, w_main)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
mainmodule = module.Module(space, w_main)
@@ -52,7 +52,7 @@
else:
return
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.record_interpreter_traceback()
raise
@@ -110,7 +110,7 @@
try:
w_stdout = space.sys.get('stdout')
w_softspace = space.getattr(w_stdout, space.wrap('softspace'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# Don't crash if user defined stdout doesn't have softspace
@@ -118,7 +118,7 @@
if space.is_true(w_softspace):
space.call_method(w_stdout, 'write', space.wrap('\n'))
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.normalize_exception(space)
w_type = operationerr.w_type
w_value = operationerr.get_w_value(space)
@@ -162,7 +162,7 @@
space.call_function(w_hook, w_type, w_value, w_traceback)
return False # done
- except OperationError, err2:
+ except OperationError as err2:
# XXX should we go through sys.get('stderr') ?
print >> sys.stderr, 'Error calling sys.excepthook:'
err2.print_application_traceback(space)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -169,7 +169,7 @@
while 1:
try:
value = eval(spec, d)
- except NameError, ex:
+ except NameError as ex:
name = ex.args[0].split("'")[1] # super-Evil
if name in d:
raise # propagate the NameError
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -110,7 +110,7 @@
if code_hook is not None:
try:
self.space.call_function(code_hook, self)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -55,21 +55,21 @@
try:
code = self.compile(source, filename, mode, flags)
return code # success
- except OperationError, err:
+ except OperationError as err:
if not err.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n", filename, mode, flags)
return None # expect more
- except OperationError, err1:
+ except OperationError as err1:
if not err1.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n\n", filename, mode, flags)
raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
+ except OperationError as err2:
if not err2.match(space, space.w_SyntaxError):
raise
@@ -131,7 +131,7 @@
try:
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError,
e.wrap_info(space))
return code
@@ -145,10 +145,10 @@
try:
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
- except parseerror.IndentationError, e:
+ except parseerror.IndentationError as e:
raise OperationError(space.w_IndentationError,
e.wrap_info(space))
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError,
e.wrap_info(space))
return mod
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -67,9 +67,9 @@
def handle_bytecode(self, co_code, next_instr, ec):
try:
next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
- except OperationError, operr:
+ except OperationError as operr:
next_instr = self.handle_operation_error(ec, operr)
- except RaiseWithExplicitTraceback, e:
+ except RaiseWithExplicitTraceback as e:
next_instr = self.handle_operation_error(ec, e.operr,
attach_tb=False)
except KeyboardInterrupt:
@@ -78,7 +78,7 @@
except MemoryError:
next_instr = self.handle_asynchronous_error(ec,
self.space.w_MemoryError)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
# Note that this case catches AttributeError!
rstackovf.check_stack_overflow()
next_instr = self.handle_asynchronous_error(ec,
@@ -117,7 +117,7 @@
finally:
if trace is not None:
self.getorcreatedebug().w_f_trace = trace
- except OperationError, e:
+ except OperationError as e:
operr = e
pytraceback.record_application_traceback(
self.space, operr, self, self.last_instr)
@@ -844,7 +844,7 @@
w_varname = self.getname_w(varindex)
try:
self.space.delitem(self.getorcreatedebug().w_locals, w_varname)
- except OperationError, e:
+ except OperationError as e:
# catch KeyErrors and turn them into NameErrors
if not e.match(self.space, self.space.w_KeyError):
raise
@@ -1003,7 +1003,7 @@
try:
if space.int_w(w_flag) == -1:
w_flag = None
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
@@ -1040,7 +1040,7 @@
w_module = self.peekvalue()
try:
w_obj = self.space.getattr(w_module, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
raise oefmt(self.space.w_ImportError,
@@ -1099,7 +1099,7 @@
w_iterator = self.peekvalue()
try:
w_nextitem = self.space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_StopIteration):
raise
# iterator exhausted
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -118,7 +118,7 @@
if enc is not None and enc not in ('utf-8', 'iso-8859-1'):
try:
textsrc = recode_to_utf8(self.space, textsrc, enc)
- except OperationError, e:
+ except OperationError as e:
# if the codec is not found, LookupError is raised. we
# check using 'is_w' not to mask potential IndexError or
# KeyError
@@ -164,10 +164,10 @@
for tp, value, lineno, column, line in tokens:
if self.add_token(tp, value, lineno, column, line):
break
- except error.TokenError, e:
+ except error.TokenError as e:
e.filename = compile_info.filename
raise
- except parser.ParseError, e:
+ except parser.ParseError as e:
# Catch parse errors, pretty them up and reraise them as a
# SyntaxError.
new_err = error.IndentationError
diff --git a/pypy/interpreter/pyparser/test/unittest_samples.py b/pypy/interpreter/pyparser/test/unittest_samples.py
--- a/pypy/interpreter/pyparser/test/unittest_samples.py
+++ b/pypy/interpreter/pyparser/test/unittest_samples.py
@@ -66,7 +66,7 @@
print
try:
assert_tuples_equal(pypy_tuples, python_tuples)
- except AssertionError,e:
+ except AssertionError as e:
error_path = e.args[-1]
print "ERROR PATH =", error_path
print "="*80
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -224,7 +224,7 @@
def _spawn(self, *args, **kwds):
try:
import pexpect
- except ImportError, e:
+ except ImportError as e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -618,14 +618,14 @@
space = self.space
try:
Arguments(space, [], w_stararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after * must be a sequence, not int"
else:
assert 0, "did not raise"
try:
Arguments(space, [], w_starstararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after ** must be a mapping, not int"
else:
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -696,7 +696,7 @@
""")
try:
self.compiler.compile(str(source), '', 'exec', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -706,7 +706,7 @@
code = 'def f(): (yield bar) += y'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -716,7 +716,7 @@
code = 'dict(a = i for i in xrange(10))'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -1011,7 +1011,7 @@
"""
try:
exec source
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unindent does not match any outer indentation level'
else:
raise Exception("DID NOT RAISE")
@@ -1021,13 +1021,13 @@
source2 = "x = (\n\n"
try:
exec source1
- except SyntaxError, err1:
+ except SyntaxError as err1:
pass
else:
raise Exception("DID NOT RAISE")
try:
exec source2
- except SyntaxError, err2:
+ except SyntaxError as err2:
pass
else:
raise Exception("DID NOT RAISE")
diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
--- a/pypy/interpreter/test/test_exec.py
+++ b/pypy/interpreter/test/test_exec.py
@@ -196,11 +196,11 @@
def test_filename(self):
try:
exec "'unmatched_quote"
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
try:
eval("'unmatched_quote")
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
def test_exec_and_name_lookups(self):
@@ -213,7 +213,7 @@
try:
res = f()
- except NameError, e: # keep py.test from exploding confused
+ except NameError as e: # keep py.test from exploding confused
raise e
assert res == 1
diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -296,14 +296,14 @@
def test_call_error_message(self):
try:
len()
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (0 given)" in e.message
else:
assert 0, "did not raise"
try:
len(1, 2)
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (2 given)" in e.message
else:
assert 0, "did not raise"
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -26,7 +26,7 @@
wrappedfunc = space.getitem(w_glob, w(functionname))
try:
w_output = space.call_function(wrappedfunc, *wrappedargs)
- except error.OperationError, e:
+ except error.OperationError as e:
#e.print_detailed_traceback(space)
return '<<<%s>>>' % e.errorstr(space)
else:
@@ -331,7 +331,7 @@
def f(): f()
try:
f()
- except RuntimeError, e:
+ except RuntimeError as e:
assert str(e) == "maximum recursion depth exceeded"
else:
assert 0, "should have raised!"
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -86,7 +86,7 @@
""")
try:
space.unpackiterable(w_a)
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_ZeroDivisionError):
raise Exception("DID NOT RAISE")
else:
@@ -237,7 +237,7 @@
self.space.getindex_w, w_instance2, self.space.w_IndexError)
try:
self.space.getindex_w(self.space.w_tuple, None, "foobar")
- except OperationError, e:
+ except OperationError as e:
assert e.match(self.space, self.space.w_TypeError)
assert "foobar" in e.errorstr(self.space)
else:
diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -376,7 +376,7 @@
def g():
try:
raise Exception
- except Exception, e:
+ except Exception as e:
import sys
raise Exception, e, sys.exc_info()[2]
diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py
--- a/pypy/interpreter/test/test_raise.py
+++ b/pypy/interpreter/test/test_raise.py
@@ -18,34 +18,34 @@
def test_1arg(self):
try:
raise SystemError, 1
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
def test_2args(self):
try:
raise SystemError, (1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_instancearg(self):
try:
raise SystemError, SystemError(1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_more_precise_instancearg(self):
try:
raise Exception, SystemError(1, 2)
- except SystemError, e:
+ except SystemError as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_builtin_exc(self):
try:
[][0]
- except IndexError, e:
+ except IndexError as e:
assert isinstance(e, IndexError)
def test_raise_cls(self):
@@ -194,7 +194,7 @@
raise Sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
sub = Sub()
@@ -202,14 +202,14 @@
raise sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
try:
raise A, sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
assert sub.val is None
@@ -217,13 +217,13 @@
raise Sub, 42
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
assert a.val == 42
try:
{}[5]
- except A, a:
+ except A as a:
assert 0
except KeyError:
pass
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -254,7 +254,7 @@
space.wrap(s),
space.wrap('?'),
space.wrap('exec'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_SyntaxError):
raise
else:
@@ -723,7 +723,7 @@
line4 = "if ?: pass\n"
try:
exec "print\nprint\nprint\n" + line4
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 4
assert e.text == line4
assert e.offset == e.text.index('?') + 1
@@ -738,7 +738,7 @@
a b c d e
bar
"""
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 4
assert e.text.endswith('a b c d e\n')
assert e.offset == e.text.index('b')
@@ -749,7 +749,7 @@
program = "(1, 2) += (3, 4)\n"
try:
exec program
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 1
assert e.text is None
else:
@@ -769,7 +769,7 @@
for s in VALID:
try:
compile(s, '?', 'exec')
- except Exception, e:
+ except Exception as e:
print '-'*20, 'FAILED TO COMPILE:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
@@ -777,7 +777,7 @@
for s in INVALID:
try:
raises(SyntaxError, compile, s, '?', 'exec')
- except Exception ,e:
+ except Exception as e:
print '-'*20, 'UNEXPECTEDLY COMPILED:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -520,7 +520,7 @@
def f(): yield 42
f().__reduce__()
""")
- except TypeError, e:
+ except TypeError as e:
if 'pickle generator' not in str(e):
raise
py.test.skip("Frames can't be __reduce__()-ed")
diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py
--- a/pypy/module/__builtin__/__init__.py
+++ b/pypy/module/__builtin__/__init__.py
@@ -102,7 +102,7 @@
space = self.space
try:
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
else:
diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py
--- a/pypy/module/__builtin__/abstractinst.py
+++ b/pypy/module/__builtin__/abstractinst.py
@@ -21,7 +21,7 @@
"""
try:
w_bases = space.getattr(w_cls, space.wrap('__bases__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return None
@@ -41,7 +41,7 @@
def abstract_getclass(space, w_obj):
try:
return space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return space.type(w_obj)
@@ -63,7 +63,7 @@
w_result = space.isinstance_allow_override(w_obj, w_klass_or_tuple)
else:
w_result = space.isinstance(w_obj, w_klass_or_tuple)
- except OperationError, e: # if w_klass_or_tuple was not a type, ignore it
+ except OperationError as e: # if w_klass_or_tuple was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
@@ -81,7 +81,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_pretendtype, w_klass_or_tuple)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
return False # ignore most exceptions
@@ -102,7 +102,7 @@
" or tuple of classes and types")
try:
w_abstractclass = space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if e.async(space): # ignore most exceptions
raise
return False
@@ -142,7 +142,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_derived, w_klass_or_tuple)
- except OperationError, e: # if one of the args was not a type, ignore it
+ except OperationError as e: # if one of the args was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -62,7 +62,7 @@
else:
try:
w_type = space.getattr(w_obj_or_type, space.wrap('__class__'))
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_AttributeError):
raise
w_type = w_objtype
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -80,7 +80,7 @@
start = space.int_w(w_start)
stop = space.int_w(w_stop)
step = space.int_w(w_step)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_OverflowError):
raise
return range_with_longs(space, w_start, w_stop, w_step)
@@ -177,7 +177,7 @@
jitdriver.jit_merge_point(has_key=has_key, has_item=has_item, w_type=w_type)
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
@@ -356,7 +356,7 @@
w_index = space.wrap(self.remaining)
try:
w_item = space.getitem(self.w_sequence, w_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -151,7 +151,7 @@
"cannot delete attribute '%s'", name)
try:
space.delitem(self.w_dict, w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
raise oefmt(space.w_AttributeError,
@@ -171,7 +171,7 @@
def get_module_string(self, space):
try:
w_mod = self.descr_getattribute(space, "__module__")
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return "?"
@@ -240,7 +240,7 @@
def binaryop(self, space, w_other):
try:
w_meth = self.getattr(space, name, False)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
return space.w_NotImplemented
raise
@@ -288,7 +288,7 @@
def _coerce_helper(space, w_self, w_other):
try:
w_tup = space.coerce(w_self, w_other)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
return [w_self, w_other]
@@ -350,7 +350,7 @@
if w_meth is not None:
try:
return space.call_function(w_meth, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not exc and e.match(space, space.w_AttributeError):
return None # eat the AttributeError
raise
@@ -542,7 +542,7 @@
return w_res
try:
res = space.int_w(w_res)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
@@ -561,7 +561,7 @@
return w_res
try:
res = space.int_w(w_res)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
@@ -630,7 +630,7 @@
while 1:
try:
w_x = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
return space.w_False
raise
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -64,7 +64,7 @@
w_name = checkattrname(space, w_name)
try:
return space.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
if w_defvalue is not None:
if e.match(space, space.w_AttributeError):
return w_defvalue
@@ -192,7 +192,7 @@
is exhausted, it is returned instead of raising StopIteration."""
try:
return space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if w_default is not None and e.match(space, space.w_StopIteration):
return w_default
raise
diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py
--- a/pypy/module/__builtin__/test/test_descriptor.py
+++ b/pypy/module/__builtin__/test/test_descriptor.py
@@ -93,7 +93,7 @@
def test_super_fail(self):
try:
super(list, 2)
- except TypeError, e:
+ except TypeError as e:
message = e.args[0]
assert message.startswith('super(type, obj): obj must be an instance or subtype of type')
@@ -303,7 +303,7 @@
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
- except TypeError, msg:
+ except TypeError as msg:
if str(msg).find('readonly') < 0:
raise Exception("when setting readonly attr %r on a "
"property, got unexpected TypeError "
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -106,7 +106,7 @@
def validate_fd(space, fd):
try:
rposix.validate_fd(fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
def get_console_cp(space):
diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py
--- a/pypy/module/__pypy__/test/test_signal.py
+++ b/pypy/module/__pypy__/test/test_signal.py
@@ -35,7 +35,7 @@
for i in range(10):
print('x')
time.sleep(0.25)
- except BaseException, e:
+ except BaseException as e:
interrupted.append(e)
finally:
print('subthread stops, interrupted=%r' % (interrupted,))
@@ -120,7 +120,7 @@
time.sleep(0.5)
with __pypy__.thread.signals_enabled:
thread.interrupt_main()
- except BaseException, e:
+ except BaseException as e:
interrupted.append(e)
finally:
lock.release()
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -113,7 +113,7 @@
must_leave = space.threadlocals.try_enter_thread(space)
self.py_invoke(ll_res, ll_args)
#
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "SystemError: callback raised ")
@@ -143,7 +143,7 @@
w_res = space.call(self.w_callable, w_args)
extra_line = "Trying to convert the result back to C:\n"
self.convert_result(ll_res, w_res)
- except OperationError, e:
+ except OperationError as e:
self.handle_applevel_exception(e, ll_res, extra_line)
@jit.unroll_safe
@@ -188,7 +188,7 @@
w_res = space.call_function(self.w_onerror, w_t, w_v, w_tb)
if not space.is_none(w_res):
self.convert_result(ll_res, w_res)
- except OperationError, e2:
+ except OperationError as e2:
# double exception! print a double-traceback...
self.print_error(e, extra_line) # original traceback
e2.write_unraisable(space, '', with_traceback=True,
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -247,7 +247,7 @@
for i in range(length):
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise oefmt(space.w_ValueError,
@@ -256,7 +256,7 @@
target = rffi.ptradd(target, ctitemsize)
try:
space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -21,7 +21,7 @@
filename = ""
try:
handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -50,7 +50,7 @@
builder = CifDescrBuilder(fargs, fresult, abi)
try:
builder.rawallocate(self)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_NotImplementedError):
raise
# else, eat the NotImplementedError. We will get the
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -177,12 +177,12 @@
space = self.space
try:
fieldname = space.str_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
try:
index = space.int_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise OperationError(space.w_TypeError,
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -381,6 +381,6 @@
space.wrap("file has no OS file descriptor"))
try:
w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py
--- a/pypy/module/_cffi_backend/embedding.py
+++ b/pypy/module/_cffi_backend/embedding.py
@@ -79,7 +79,7 @@
patch_sys(space)
load_embedded_cffi_module(space, version, init_struct)
res = 0
- except OperationError, operr:
+ except OperationError as operr:
operr.write_unraisable(space, "initialization of '%s'" % name,
with_traceback=True)
space.appexec([], r"""():
@@ -91,7 +91,7 @@
res = -1
if must_leave:
space.threadlocals.leave_thread(space)
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "From initialization of '")
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -109,7 +109,7 @@
# w.r.t. buffers and memoryviews??
try:
buf = space.readbuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_SIMPLE)
@@ -118,7 +118,7 @@
def _fetch_as_write_buffer(space, w_x):
try:
buf = space.writebuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_WRITABLE)
diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -39,7 +39,7 @@
mod = __import__(modname, None, None, ['ffi', 'lib'])
return mod.lib""")
lib1 = space.interp_w(W_LibObject, w_lib1)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
raise oefmt(space.w_ImportError,
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -24,7 +24,7 @@
filename = ""
try:
self.handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -132,7 +132,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -149,7 +149,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -172,7 +172,7 @@
return r_ulonglong(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
@@ -197,7 +197,7 @@
return r_uint(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -175,7 +175,7 @@
w_start = space.getattr(w_exc, space.wrap('start'))
w_end = space.getattr(w_exc, space.wrap('end'))
w_obj = space.getattr(w_exc, space.wrap('object'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
raise OperationError(space.w_TypeError, space.wrap(
@@ -533,7 +533,7 @@
else:
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -566,7 +566,7 @@
# get the character from the mapping
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -645,7 +645,7 @@
space = self.space
try:
w_code = space.call_function(self.w_getcode, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
return -1
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -458,7 +458,7 @@
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
- except UnicodeDecodeError, ex:
+ except UnicodeDecodeError as ex:
assert "unicode_internal" == ex.encoding
assert "\x00\x00\x00\x00\x00\x11\x11\x00" == ex.object
assert ex.start == 4
@@ -650,7 +650,7 @@
def test_utf7_start_end_in_exception(self):
try:
'+IC'.decode('utf-7')
- except UnicodeDecodeError, exc:
+ except UnicodeDecodeError as exc:
assert exc.start == 0
assert exc.end == 3
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -169,7 +169,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
@@ -191,7 +191,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py
--- a/pypy/module/_collections/test/test_defaultdict.py
+++ b/pypy/module/_collections/test/test_defaultdict.py
@@ -26,7 +26,7 @@
for key in ['foo', (1,)]:
try:
d1[key]
- except KeyError, err:
+ except KeyError as err:
assert err.args[0] == key
else:
assert 0, "expected KeyError"
diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -224,7 +224,7 @@
try:
frame = self.bottomframe
w_result = frame.execute_frame()
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py
--- a/pypy/module/_continuation/interp_pickle.py
+++ b/pypy/module/_continuation/interp_pickle.py
@@ -69,7 +69,7 @@
try:
w_result = post_switch(sthread, h)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
#
@@ -88,7 +88,7 @@
try:
w_result = frame.execute_frame(w_result, operr)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
if exit_continulet is not None:
@@ -97,7 +97,7 @@
sthread.ec.topframeref = jit.vref_None
if operr:
raise operr
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py
--- a/pypy/module/_continuation/test/support.py
+++ b/pypy/module/_continuation/test/support.py
@@ -8,6 +8,6 @@
def setup_class(cls):
try:
import rpython.rlib.rstacklet
- except CompilationError, e:
+ except CompilationError as e:
py.test.skip("cannot import rstacklet: %s" % e)
diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -553,11 +553,11 @@
res = "got keyerror"
try:
c1.switch(res)
- except IndexError, e:
+ except IndexError as e:
pass
try:
c1.switch(e)
- except IndexError, e2:
+ except IndexError as e2:
pass
try:
c1.switch(e2)
diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py
--- a/pypy/module/_csv/interp_reader.py
+++ b/pypy/module/_csv/interp_reader.py
@@ -66,7 +66,7 @@
while True:
try:
w_line = space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
if (field_builder is not None and
state != START_RECORD and state != EAT_CRNL and
diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py
--- a/pypy/module/_csv/interp_writer.py
+++ b/pypy/module/_csv/interp_writer.py
@@ -49,7 +49,7 @@
try:
space.float_w(w_field) # is it an int/long/float?
quoted = False
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
quoted = True
@@ -124,7 +124,7 @@
while True:
try:
w_seq = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -56,7 +56,7 @@
assert isinstance(self, W_File)
try:
self.direct_close()
- except StreamErrors, e:
+ except StreamErrors as e:
operr = wrap_streamerror(self.space, e, self.w_name)
raise operr
@@ -203,7 +203,7 @@
while n > 0:
try:
data = stream.read(n)
- except OSError, e:
+ except OSError as e:
# a special-case only for read() (similar to CPython, which
# also loses partial data with other methods): if we get
# EAGAIN after already some data was received, return it.
diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -83,7 +83,7 @@
"""
try:
return self.stream.read(n)
- except StreamErrors, e:
+ except StreamErrors as e:
raise wrap_streamerror(self.space, e)
def do_write(self, data):
@@ -94,7 +94,7 @@
"""
try:
self.stream.write(data)
- except StreamErrors, e:
+ except StreamErrors as e:
raise wrap_streamerror(self.space, e)
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -151,7 +151,7 @@
def test_oserror_has_filename(self):
try:
f = self.file("file that is clearly not there")
- except IOError, e:
+ except IOError as e:
assert e.filename == 'file that is clearly not there'
else:
raise Exception("did not raise")
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -28,7 +28,7 @@
space = global_name_fetcher.space
w_name = space.wrap(rffi.charp2str(obj_name[0].c_name))
global_name_fetcher.meth_names.append(w_name)
- except OperationError, e:
+ except OperationError as e:
global_name_fetcher.w_error = e
class NameFetcher:
diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -99,7 +99,7 @@
for hash_name, expected in sorted(expected_results.items()):
try:
m = _hashlib.new(hash_name)
- except ValueError, e:
+ except ValueError as e:
print 'skipped %s: %s' % (hash_name, e)
continue
m.update(test_string)
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -223,7 +223,7 @@
typename = space.type(self).name
try:
w_name = space.getattr(self, space.wrap("name"))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_Exception):
raise
return space.wrap("<%s>" % (typename,))
@@ -350,7 +350,7 @@
while True:
try:
w_written = space.call_method(self.w_raw, "write", w_data)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
raise
@@ -526,7 +526,7 @@
while True:
try:
w_size = space.call_method(self.w_raw, "readinto", w_buf)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
raise
@@ -733,7 +733,7 @@
# First write the current buffer
try:
self._writer_flush_unlocked(space)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.gettypeobject(
W_BlockingIOError.typedef)):
raise
diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py
--- a/pypy/module/_io/interp_fileio.py
+++ b/pypy/module/_io/interp_fileio.py
@@ -139,7 +139,7 @@
fd = -1
try:
fd = space.c_int_w(w_name)
- except OperationError, e:
+ except OperationError as e:
pass
else:
if fd < 0:
@@ -153,7 +153,7 @@
if fd >= 0:
try:
os.fstat(fd)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EBADF:
raise wrap_oserror(space, e)
# else: pass
@@ -170,7 +170,7 @@
try:
self.fd = dispatch_filename(rposix.open)(
space, w_name, flags, 0666)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror2(space, e, w_name,
exception_name='w_IOError')
finally:
@@ -184,7 +184,7 @@
# (otherwise, it might be done only on the first write()).
try:
os.lseek(self.fd, 0, os.SEEK_END)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e, exception_name='w_IOError')
except:
if not fd_is_own:
@@ -237,7 +237,7 @@
try:
os.close(fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e,
exception_name='w_IOError')
@@ -274,7 +274,7 @@
self._check_closed(space)
try:
pos = os.lseek(self.fd, pos, whence)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e,
exception_name='w_IOError')
return space.wrap(pos)
@@ -283,7 +283,7 @@
self._check_closed(space)
try:
pos = os.lseek(self.fd, 0, 1)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e,
exception_name='w_IOError')
return space.wrap(pos)
@@ -317,7 +317,7 @@
self._check_closed(space)
try:
res = os.isatty(self.fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e, exception_name='w_IOError')
return space.wrap(res)
@@ -344,7 +344,7 @@
try:
n = os.write(self.fd, data)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e,
@@ -362,7 +362,7 @@
try:
s = os.read(self.fd, size)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e,
@@ -377,7 +377,7 @@
length = rwbuffer.getlength()
try:
buf = os.read(self.fd, length)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e,
@@ -396,7 +396,7 @@
try:
chunk = os.read(self.fd, newsize - total)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR:
space.getexecutioncontext().checksignals()
continue
@@ -430,7 +430,7 @@
try:
self._truncate(space.r_longlong_w(w_size))
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e, exception_name='w_IOError')
return w_size
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -192,7 +192,7 @@
if has_peek:
try:
w_readahead = space.call_method(self, "peek", space.wrap(1))
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
@@ -222,7 +222,7 @@
try:
w_read = space.call_method(self, "read", space.wrap(nreadahead))
From pypy.commits at gmail.com Mon May 2 15:58:44 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 02 May 2016 12:58:44 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-ext: update TODO
Message-ID: <5727b174.21f9c20a.7fa46.7c1f@mx.google.com>
Author: Matti Picus
Branch: cpyext-ext
Changeset: r84144:2dc18fa293ce
Date: 2016-05-02 13:50 +0300
http://bitbucket.org/pypy/pypy/changeset/2dc18fa293ce/
Log: update TODO
diff --git a/TODO b/TODO
--- a/TODO
+++ b/TODO
@@ -1,10 +1,5 @@
-* python setup.py install in numpy does not somehow tell setuptools
- it's installed (I bet it's about the py27 tag)
-* reduce size of generated c code from slot definitions in slotdefs.
-* fix py_string_as_string_unicode-getstringandsize_unicode which
- segfaults when run -A after printing '.', the same test passes cpython -A
- and untranslated
-* export ndarrayobject objects like PyArrayObject, PyArrayDescrObject needed
+* Add ByteArrayObject
+* Export ndarrayobject objects like PyArrayObject, PyArrayDescrObject needed
to coninue using micronumpy as a numpy 1.10 ndarray alternative
This used to be done with pypy-specific headers which replaced upstream's
headers, can be tested by installing matplotlib or aubio (pypy/numpy issue #47)
From pypy.commits at gmail.com Mon May 2 15:58:46 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 02 May 2016 12:58:46 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-ext: merge default into branch
Message-ID: <5727b176.8344c20a.c3eed.7cee@mx.google.com>
Author: Matti Picus
Branch: cpyext-ext
Changeset: r84145:a08c66c9b40e
Date: 2016-05-02 13:53 +0300
http://bitbucket.org/pypy/pypy/changeset/a08c66c9b40e/
Log: merge default into branch
diff too long, truncating to 2000 out of 2113 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -21,3 +21,4 @@
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details ` are `available here `.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -24,7 +24,11 @@
remove-objspace-options.
.. branch: cpyext-for-merge
-Update cpyext C-API support:
+
+Update cpyext C-API support After this branch, we are almost able to support
+upstream numpy via cpyext, so we created (yet another) fork of numpy at
+github.com/pypy/numpy with the needed changes. Among the significant changes
+to cpyext:
- allow c-snippet tests to be run with -A so we can verify we are compatible
- fix many edge cases exposed by fixing tests to run with -A
- issequence() logic matches cpython
@@ -40,6 +44,20 @@
- rewrite slot assignment for typeobjects
- improve tracking of PyObject to rpython object mapping
- support tp_as_{number, sequence, mapping, buffer} slots
-After this branch, we are almost able to support upstream numpy via cpyext, so
-we created (yet another) fork of numpy at github.com/pypy/numpy with the needed
-changes
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -78,7 +78,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
# we arrive here if no exception is raised. stdout cosmetics...
try:
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -362,6 +362,26 @@
""")
assert seen == [1]
+ def test_mapdict_number_of_slots(self):
+ space = self.space
+ a, b, c = space.unpackiterable(space.appexec([], """():
+ class A(object):
+ pass
+ a = A()
+ a.x = 1
+ class B:
+ pass
+ b = B()
+ b.x = 1
+ class C(int):
+ pass
+ c = C(1)
+ c.x = 1
+ return a, b, c
+ """), 3)
+ assert not hasattr(a, "storage")
+ assert not hasattr(b, "storage")
+ assert hasattr(c, "storage")
class AppTestTypeDef:
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -103,44 +103,63 @@
# we need two subclasses of the app-level type, one to add mapdict, and then one
# to add del to not slow down the GC.
-def get_unique_interplevel_subclass(config, cls, needsdel=False):
+def get_unique_interplevel_subclass(space, cls, needsdel=False):
"NOT_RPYTHON: initialization-time only"
if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = config, cls, needsdel
+ key = space, cls, needsdel
try:
return _subclass_cache[key]
except KeyError:
# XXX can save a class if cls already has a __del__
if needsdel:
- cls = get_unique_interplevel_subclass(config, cls, False)
- subcls = _getusercls(config, cls, needsdel)
+ cls = get_unique_interplevel_subclass(space, cls, False)
+ subcls = _getusercls(space, cls, needsdel)
assert key not in _subclass_cache
_subclass_cache[key] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
-def _getusercls(config, cls, wants_del, reallywantdict=False):
+def _getusercls(space, cls, wants_del, reallywantdict=False):
from rpython.rlib import objectmodel
+ from pypy.objspace.std.objectobject import W_ObjectObject
+ from pypy.module.__builtin__.interp_classobj import W_InstanceObject
from pypy.objspace.std.mapdict import (BaseUserClassMapdict,
MapdictDictSupport, MapdictWeakrefSupport,
- _make_storage_mixin_size_n)
+ _make_storage_mixin_size_n, MapdictStorageMixin)
typedef = cls.typedef
name = cls.__name__ + "User"
- mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()]
+ mixins_needed = []
+ if cls is W_ObjectObject or cls is W_InstanceObject:
+ mixins_needed.append(_make_storage_mixin_size_n())
+ else:
+ mixins_needed.append(MapdictStorageMixin)
+ copy_methods = [BaseUserClassMapdict]
if reallywantdict or not typedef.hasdict:
# the type has no dict, mapdict to provide the dict
- mixins_needed.append(MapdictDictSupport)
+ copy_methods.append(MapdictDictSupport)
name += "Dict"
if not typedef.weakrefable:
# the type does not support weakrefs yet, mapdict to provide weakref
# support
- mixins_needed.append(MapdictWeakrefSupport)
+ copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
if wants_del:
+ # This subclass comes with an app-level __del__. To handle
+ # it, we make an RPython-level __del__ method. This
+ # RPython-level method is called directly by the GC and it
+ # cannot do random things (calling the app-level __del__ would
+ # be "random things"). So instead, we just call here
+ # enqueue_for_destruction(), and the app-level __del__ will be
+ # called later at a safe point (typically between bytecodes).
+ # If there is also an inherited RPython-level __del__, it is
+ # called afterwards---not immediately! This base
+ # RPython-level __del__ is supposed to run only when the
+ # object is not reachable any more. NOTE: it doesn't fully
+ # work: see issue #2287.
name += "Del"
parent_destructor = getattr(cls, '__del__', None)
def call_parent_del(self):
@@ -148,14 +167,14 @@
parent_destructor(self)
def call_applevel_del(self):
assert isinstance(self, subcls)
- self.space.userdel(self)
+ space.userdel(self)
class Proto(object):
def __del__(self):
self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, call_applevel_del,
+ self.enqueue_for_destruction(space, call_applevel_del,
'method __del__ of ')
if parent_destructor is not None:
- self.enqueue_for_destruction(self.space, call_parent_del,
+ self.enqueue_for_destruction(space, call_parent_del,
'internal destructor of ')
mixins_needed.append(Proto)
@@ -163,10 +182,17 @@
user_overridden_class = True
for base in mixins_needed:
objectmodel.import_from_mixin(base)
+ for copycls in copy_methods:
+ _copy_methods(copycls, subcls)
del subcls.base
subcls.__name__ = name
return subcls
+def _copy_methods(copycls, subcls):
+ for key, value in copycls.__dict__.items():
+ if (not key.startswith('__') or key == '__del__'):
+ setattr(subcls, key, value)
+
# ____________________________________________________________
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -195,9 +195,9 @@
return
self.cls_without_del = _getusercls(
- space.config, W_InstanceObject, False, reallywantdict=True)
+ space, W_InstanceObject, False, reallywantdict=True)
self.cls_with_del = _getusercls(
- space.config, W_InstanceObject, True, reallywantdict=True)
+ space, W_InstanceObject, True, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py
--- a/pypy/module/_io/test/test_bufferedio.py
+++ b/pypy/module/_io/test/test_bufferedio.py
@@ -307,7 +307,6 @@
class MyIO(_io.BufferedWriter):
def __del__(self):
record.append(1)
- super(MyIO, self).__del__()
def close(self):
record.append(2)
super(MyIO, self).close()
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -88,7 +88,6 @@
class MyIO(io.IOBase):
def __del__(self):
record.append(1)
- super(MyIO, self).__del__()
def close(self):
record.append(2)
super(MyIO, self).close()
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -436,7 +436,7 @@
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected" % (s, self.templ_args[i])))
+ "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -10,6 +10,7 @@
from rpython.rtyper.lltypesystem import ll2ctypes
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
+from rpython.rlib.objectmodel import dont_inline
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -87,13 +88,13 @@
FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
- fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT)
+ dash = '_'
else:
- fileno = rffi.llexternal('fileno', [FILEP], rffi.INT)
-
+ dash = ''
+fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
-fdopen = rffi.llexternal('fdopen', [rffi.INT, CONST_STRING], FILEP,
- save_err=rffi.RFFI_SAVE_ERRNO)
+fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
+ FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
@@ -255,7 +256,7 @@
class ApiFunction:
def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED,
- c_name=None, gil=None, result_borrowed=False):
+ c_name=None, gil=None, result_borrowed=False, result_is_ll=False):
self.argtypes = argtypes
self.restype = restype
self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype))
@@ -276,6 +277,9 @@
assert len(self.argnames) == len(self.argtypes)
self.gil = gil
self.result_borrowed = result_borrowed
+ self.result_is_ll = result_is_ll
+ if result_is_ll: # means 'returns a low-level PyObject pointer'
+ assert is_PyObject(restype)
#
def get_llhelper(space):
return llhelper(self.functype, self.get_wrapper(space))
@@ -297,7 +301,7 @@
DEFAULT_HEADER = 'pypy_decl.h'
def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER,
- gil=None, result_borrowed=False):
+ gil=None, result_borrowed=False, result_is_ll=False):
"""
Declares a function to be exported.
- `argtypes`, `restype` are lltypes and describe the function signature.
@@ -336,7 +340,8 @@
c_name = func_name
api_function = ApiFunction(argtypes, restype, func, error,
c_name=c_name, gil=gil,
- result_borrowed=result_borrowed)
+ result_borrowed=result_borrowed,
+ result_is_ll=result_is_ll)
func.api_func = api_function
if error is _NOT_SPECIFIED:
@@ -612,6 +617,9 @@
def is_PyObject(TYPE):
if not isinstance(TYPE, lltype.Ptr):
return False
+ if TYPE == PyObject:
+ return True
+ assert not isinstance(TYPE.TO, lltype.ForwardReference)
return hasattr(TYPE.TO, 'c_ob_refcnt') and hasattr(TYPE.TO, 'c_ob_type')
# a pointer to PyObject
@@ -668,37 +676,161 @@
pypy_debug_catch_fatal_exception = rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void)
+
+# ____________________________________________________________
+
+
+class WrapperCache(object):
+ def __init__(self, space):
+ self.space = space
+ self.wrapper_gens = {} # {signature: WrapperGen()}
+ self.stats = [0, 0]
+
+class WrapperGen(object):
+ wrapper_second_level = None
+
+ def __init__(self, space, signature):
+ self.space = space
+ self.signature = signature
+ self.callable2name = []
+
+ def make_wrapper(self, callable):
+ self.callable2name.append((callable, callable.__name__))
+ if self.wrapper_second_level is None:
+ self.wrapper_second_level = make_wrapper_second_level(
+ self.space, self.callable2name, *self.signature)
+ wrapper_second_level = self.wrapper_second_level
+
+ def wrapper(*args):
+ # no GC here, not even any GC object
+ args += (callable,)
+ return wrapper_second_level(*args)
+
+ wrapper.__name__ = "wrapper for %r" % (callable, )
+ return wrapper
+
+
# Make the wrapper for the cases (1) and (2)
def make_wrapper(space, callable, gil=None):
"NOT_RPYTHON"
+ # This logic is obscure, because we try to avoid creating one
+ # big wrapper() function for every callable. Instead we create
+ # only one per "signature".
+
+ argnames = callable.api_func.argnames
+ argtypesw = zip(callable.api_func.argtypes,
+ [_name.startswith("w_") for _name in argnames])
+ error_value = getattr(callable.api_func, "error_value", CANNOT_FAIL)
+ if (isinstance(callable.api_func.restype, lltype.Ptr)
+ and error_value is not CANNOT_FAIL):
+ assert lltype.typeOf(error_value) == callable.api_func.restype
+ assert not error_value # only support error=NULL
+ error_value = 0 # because NULL is not hashable
+
+ if callable.api_func.result_is_ll:
+ result_kind = "L"
+ elif callable.api_func.result_borrowed:
+ result_kind = "B" # note: 'result_borrowed' is ignored if we also
+ else: # say 'result_is_ll=True' (in this case it's
+ result_kind = "." # up to you to handle refcounting anyway)
+
+ signature = (tuple(argtypesw),
+ callable.api_func.restype,
+ result_kind,
+ error_value,
+ gil)
+
+ cache = space.fromcache(WrapperCache)
+ cache.stats[1] += 1
+ try:
+ wrapper_gen = cache.wrapper_gens[signature]
+ except KeyError:
+ print signature
+ wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
+ signature)
+ cache.stats[0] += 1
+ #print 'Wrapper cache [wrappers/total]:', cache.stats
+ return wrapper_gen.make_wrapper(callable)
+
+
+ at dont_inline
+def deadlock_error(funcname):
+ fatalerror_notb("GIL deadlock detected when a CPython C extension "
+ "module calls '%s'" % (funcname,))
+
+ at dont_inline
+def no_gil_error(funcname):
+ fatalerror_notb("GIL not held when a CPython C extension "
+ "module calls '%s'" % (funcname,))
+
+ at dont_inline
+def not_supposed_to_fail(funcname):
+ raise SystemError("The function '%s' was not supposed to fail"
+ % (funcname,))
+
+ at dont_inline
+def unexpected_exception(funcname, e, tb):
+ print 'Fatal error in cpyext, CPython compatibility layer, calling',funcname
+ print 'Either report a bug or consider not using this particular extension'
+ if not we_are_translated():
+ if tb is None:
+ tb = sys.exc_info()[2]
+ import traceback
+ traceback.print_exc()
+ if sys.stdout == sys.__stdout__:
+ import pdb; pdb.post_mortem(tb)
+ # we can't do much here, since we're in ctypes, swallow
+ else:
+ print str(e)
+ pypy_debug_catch_fatal_exception()
+ assert False
+
+def make_wrapper_second_level(space, callable2name, argtypesw, restype,
+ result_kind, error_value, gil):
from rpython.rlib import rgil
- names = callable.api_func.argnames
- argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes,
- [name.startswith("w_") for name in names])))
- fatal_value = callable.api_func.restype._defl()
+ argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw))
+ fatal_value = restype._defl()
+ gil_auto_workaround = (gil is None) # automatically detect when we don't
+ # have the GIL, and acquire/release it
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
pygilstate_ensure = (gil == "pygilstate_ensure")
pygilstate_release = (gil == "pygilstate_release")
assert (gil is None or gil_acquire or gil_release
or pygilstate_ensure or pygilstate_release)
- deadlock_error = ("GIL deadlock detected when a CPython C extension "
- "module calls %r" % (callable.__name__,))
- no_gil_error = ("GIL not held when a CPython C extension "
- "module calls %r" % (callable.__name__,))
+ expected_nb_args = len(argtypesw) + pygilstate_ensure
- @specialize.ll()
- def wrapper(*args):
+ if isinstance(restype, lltype.Ptr) and error_value == 0:
+ error_value = lltype.nullptr(restype.TO)
+ if error_value is not CANNOT_FAIL:
+ assert lltype.typeOf(error_value) == lltype.typeOf(fatal_value)
+
+ def invalid(err):
+ "NOT_RPYTHON: translation-time crash if this ends up being called"
+ raise ValueError(err)
+ invalid.__name__ = 'invalid_%s' % (callable2name[0][1],)
+
+ def nameof(callable):
+ for c, n in callable2name:
+ if c is callable:
+ return n
+ return ''
+ nameof._dont_inline_ = True
+
+ def wrapper_second_level(*args):
from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj
from pypy.module.cpyext.pyobject import as_pyobj
# we hope that malloc removal removes the newtuple() that is
# inserted exactly here by the varargs specializer
+ callable = args[-1]
+ args = args[:-1]
# see "Handling of the GIL" above (careful, we don't have the GIL here)
tid = rthread.get_or_make_ident()
- if gil_acquire:
+ _gil_auto = (gil_auto_workaround and cpyext_glob_tid_ptr[0] != tid)
+ if gil_acquire or _gil_auto:
if cpyext_glob_tid_ptr[0] == tid:
- fatalerror_notb(deadlock_error)
+ deadlock_error(nameof(callable))
rgil.acquire()
assert cpyext_glob_tid_ptr[0] == 0
elif pygilstate_ensure:
@@ -711,7 +843,7 @@
args += (pystate.PyGILState_UNLOCKED,)
else:
if cpyext_glob_tid_ptr[0] != tid:
- fatalerror_notb(no_gil_error)
+ no_gil_error(nameof(callable))
cpyext_glob_tid_ptr[0] = 0
rffi.stackcounter.stacks_counter += 1
@@ -722,8 +854,7 @@
try:
if not we_are_translated() and DEBUG_WRAPPER:
print >>sys.stderr, callable,
- assert len(args) == (len(callable.api_func.argtypes) +
- pygilstate_ensure)
+ assert len(args) == expected_nb_args
for i, (typ, is_wrapped) in argtypes_enum_ui:
arg = args[i]
if is_PyObject(typ) and is_wrapped:
@@ -757,41 +888,31 @@
failed = False
if failed:
- error_value = callable.api_func.error_value
if error_value is CANNOT_FAIL:
- raise SystemError("The function '%s' was not supposed to fail"
- % (callable.__name__,))
+ raise not_supposed_to_fail(nameof(callable))
retval = error_value
- elif is_PyObject(callable.api_func.restype):
+ elif is_PyObject(restype):
if is_pyobj(result):
- retval = result
+ if result_kind != "L":
+ raise invalid("missing result_is_ll=True")
else:
- if result is not None:
- if callable.api_func.result_borrowed:
- retval = as_pyobj(space, result)
- else:
- retval = make_ref(space, result)
- retval = rffi.cast(callable.api_func.restype, retval)
+ if result_kind == "L":
+ raise invalid("result_is_ll=True but not ll PyObject")
+ if result_kind == "B": # borrowed
+ result = as_pyobj(space, result)
else:
- retval = lltype.nullptr(PyObject.TO)
- elif callable.api_func.restype is not lltype.Void:
- retval = rffi.cast(callable.api_func.restype, result)
+ result = make_ref(space, result)
+ retval = rffi.cast(restype, result)
+
+ elif restype is not lltype.Void:
+ retval = rffi.cast(restype, result)
+
except Exception, e:
- print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__
- print 'Either report a bug or consider not using this particular extension'
- if not we_are_translated():
- if tb is None:
- tb = sys.exc_info()[2]
- import traceback
- traceback.print_exc()
- if sys.stdout == sys.__stdout__:
- import pdb; pdb.post_mortem(tb)
- # we can't do much here, since we're in ctypes, swallow
- else:
- print str(e)
- pypy_debug_catch_fatal_exception()
- assert False
+ unexpected_exception(nameof(callable), e, tb)
+ return fatal_value
+
+ assert lltype.typeOf(retval) == restype
rffi.stackcounter.stacks_counter -= 1
# see "Handling of the GIL" above
@@ -801,16 +922,16 @@
arg = rffi.cast(lltype.Signed, args[-1])
unlock = (arg == pystate.PyGILState_UNLOCKED)
else:
- unlock = gil_release
+ unlock = gil_release or _gil_auto
if unlock:
rgil.release()
else:
cpyext_glob_tid_ptr[0] = tid
return retval
- callable._always_inline_ = 'try'
- wrapper.__name__ = "wrapper for %r" % (callable, )
- return wrapper
+
+ wrapper_second_level._dont_inline_ = True
+ return wrapper_second_level
def process_va_name(name):
return name.replace('*', '_star')
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -6,7 +6,7 @@
from pypy.module.cpyext.pyerrors import PyErr_BadArgument
from pypy.module.cpyext.pyobject import (
PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference,
- make_typedescr, get_typedescr, Py_IncRef)
+ make_typedescr, get_typedescr, as_pyobj, Py_IncRef, get_w_obj_and_decref)
##
## Implementation of PyStringObject
@@ -124,7 +124,7 @@
#_______________________________________________________________________
- at cpython_api([CONST_STRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyString_FromStringAndSize(space, char_p, length):
if char_p:
s = rffi.charpsize2str(char_p, length)
@@ -233,7 +233,7 @@
def _PyString_Eq(space, w_str1, w_str2):
return space.eq_w(w_str1, w_str2)
- at cpython_api([PyObjectP, PyObject], lltype.Void)
+ at cpython_api([PyObjectP, PyObject], lltype.Void, error=None)
def PyString_Concat(space, ref, w_newpart):
"""Create a new string object in *string containing the contents of newpart
appended to string; the caller will own the new reference. The reference to
@@ -241,26 +241,27 @@
the old reference to string will still be discarded and the value of
*string will be set to NULL; the appropriate exception will be set."""
- if not ref[0]:
+ old = ref[0]
+ if not old:
return
- if w_newpart is None or not PyString_Check(space, ref[0]) or not \
- (space.isinstance_w(w_newpart, space.w_str) or
- space.isinstance_w(w_newpart, space.w_unicode)):
- Py_DecRef(space, ref[0])
- ref[0] = lltype.nullptr(PyObject.TO)
- return
- w_str = from_ref(space, ref[0])
- w_newstr = space.add(w_str, w_newpart)
- ref[0] = make_ref(space, w_newstr)
- Py_IncRef(space, ref[0])
+ ref[0] = lltype.nullptr(PyObject.TO)
+ w_str = get_w_obj_and_decref(space, old)
+ if w_newpart is not None and PyString_Check(space, old):
+ # xxx if w_newpart is not a string or unicode or bytearray,
+ # this might call __radd__() on it, whereas CPython raises
+ # a TypeError in this case.
+ w_newstr = space.add(w_str, w_newpart)
+ ref[0] = make_ref(space, w_newstr)
- at cpython_api([PyObjectP, PyObject], lltype.Void)
+ at cpython_api([PyObjectP, PyObject], lltype.Void, error=None)
def PyString_ConcatAndDel(space, ref, newpart):
"""Create a new string object in *string containing the contents of newpart
appended to string. This version decrements the reference count of newpart."""
- PyString_Concat(space, ref, newpart)
- Py_DecRef(space, newpart)
+ try:
+ PyString_Concat(space, ref, newpart)
+ finally:
+ Py_DecRef(space, newpart)
@cpython_api([PyObject, PyObject], PyObject)
def PyString_Format(space, w_format, w_args):
diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py
--- a/pypy/module/cpyext/cdatetime.py
+++ b/pypy/module/cpyext/cdatetime.py
@@ -15,6 +15,7 @@
('DateTimeType', PyTypeObjectPtr),
('TimeType', PyTypeObjectPtr),
('DeltaType', PyTypeObjectPtr),
+ ('TZInfoType', PyTypeObjectPtr),
))
@cpython_api([], lltype.Ptr(PyDateTime_CAPI))
@@ -40,6 +41,10 @@
datetimeAPI.c_DeltaType = rffi.cast(
PyTypeObjectPtr, make_ref(space, w_type))
+ w_type = space.getattr(w_datetime, space.wrap("tzinfo"))
+ datetimeAPI.c_TZInfoType = rffi.cast(
+ PyTypeObjectPtr, make_ref(space, w_type))
+
return datetimeAPI
PyDateTime_DateStruct = lltype.ForwardReference()
@@ -87,6 +92,7 @@
make_check_function("PyDate_Check", "date")
make_check_function("PyTime_Check", "time")
make_check_function("PyDelta_Check", "timedelta")
+make_check_function("PyTZInfo_Check", "tzinfo")
# Constructors
diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py
--- a/pypy/module/cpyext/frameobject.py
+++ b/pypy/module/cpyext/frameobject.py
@@ -67,7 +67,8 @@
track_reference(space, py_obj, w_obj)
return w_obj
- at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject)
+ at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject,
+ result_is_ll=True)
def PyFrame_New(space, tstate, w_code, w_globals, w_locals):
typedescr = get_typedescr(PyFrame.typedef)
py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef))
diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h
--- a/pypy/module/cpyext/include/datetime.h
+++ b/pypy/module/cpyext/include/datetime.h
@@ -11,6 +11,7 @@
PyTypeObject *DateTimeType;
PyTypeObject *TimeType;
PyTypeObject *DeltaType;
+ PyTypeObject *TZInfoType;
} PyDateTime_CAPI;
PyAPI_DATA(PyDateTime_CAPI*) PyDateTimeAPI;
@@ -36,6 +37,10 @@
PyObject_HEAD
} PyDateTime_DateTime;
+typedef struct {
+ PyObject_HEAD
+} PyDateTime_TZInfo;
+
#ifdef __cplusplus
}
#endif
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -239,9 +239,7 @@
gufunctype = lltype.Ptr(ufuncs.GenericUfunc)
-# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there
-# a problem with casting function pointers?
- at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
+ at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t,
rffi.CCHARP], PyObject, header=HEADER)
def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes,
@@ -256,7 +254,7 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
- funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data)
+ funcs_w[i] = ufuncs.W_GenericUFuncCaller(funcs[i], data)
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
w_funcs = space.newlist(funcs_w)
@@ -268,7 +266,7 @@
w_signature, w_identity, w_name, w_doc, stack_inputs=True)
return ufunc_generic
- at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
+ at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER)
def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes,
nin, nout, identity, name, doc, check_return):
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -34,11 +34,11 @@
def PyObject_Free(space, ptr):
lltype.free(ptr, flavor='raw')
- at cpython_api([PyTypeObjectPtr], PyObject)
+ at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True)
def _PyObject_New(space, type):
return _PyObject_NewVar(space, type, 0)
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def _PyObject_NewVar(space, type, itemcount):
w_type = from_ref(space, rffi.cast(PyObject, type))
assert isinstance(w_type, W_TypeObject)
@@ -63,7 +63,7 @@
if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE:
Py_DecRef(space, rffi.cast(PyObject, pto))
- at cpython_api([PyTypeObjectPtr], PyObject)
+ at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True)
def _PyObject_GC_New(space, type):
return _PyObject_New(space, type)
@@ -193,7 +193,7 @@
space.delitem(w_obj, w_key)
return 0
- at cpython_api([PyObject, PyTypeObjectPtr], PyObject)
+ at cpython_api([PyObject, PyTypeObjectPtr], PyObject, result_is_ll=True)
def PyObject_Init(space, obj, type):
"""Initialize a newly-allocated object op with its type and initial
reference. Returns the initialized object. If type indicates that the
@@ -207,7 +207,7 @@
obj.c_ob_refcnt = 1
return obj
- at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def PyObject_InitVar(space, py_obj, type, size):
"""This does everything PyObject_Init() does, and also initializes the
length information for a variable-size object."""
@@ -308,7 +308,7 @@
w_res = PyObject_RichCompare(space, ref1, ref2, opid)
return int(space.is_true(w_res))
- at cpython_api([PyObject], PyObject)
+ at cpython_api([PyObject], PyObject, result_is_ll=True)
def PyObject_SelfIter(space, ref):
"""Undocumented function, this is what CPython does."""
Py_IncRef(space, ref)
diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py
--- a/pypy/module/cpyext/pystate.py
+++ b/pypy/module/cpyext/pystate.py
@@ -168,8 +168,16 @@
state = space.fromcache(InterpreterState)
return state.get_thread_state(space)
- at cpython_api([], PyObject, error=CANNOT_FAIL)
+ at cpython_api([], PyObject, result_is_ll=True, error=CANNOT_FAIL)
def PyThreadState_GetDict(space):
+ """Return a dictionary in which extensions can store thread-specific state
+ information. Each extension should use a unique key to use to store state in
+ the dictionary. It is okay to call this function when no current thread state
+ is available. If this function returns NULL, no exception has been raised and
+ the caller should assume no current thread state is available.
+
+ Previously this could only be called when a current thread is active, and NULL
+ meant that an exception was raised."""
state = space.fromcache(InterpreterState)
return state.get_thread_state(space).c_dict
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -1099,19 +1099,6 @@
PyInterpreterState_Clear()."""
raise NotImplementedError
- at cpython_api([], PyObject)
-def PyThreadState_GetDict(space):
- """Return a dictionary in which extensions can store thread-specific state
- information. Each extension should use a unique key to use to store state in
- the dictionary. It is okay to call this function when no current thread state
- is available. If this function returns NULL, no exception has been raised and
- the caller should assume no current thread state is available.
-
- Previously this could only be called when a current thread is active, and NULL
- meant that an exception was raised."""
- borrow_from()
- raise NotImplementedError
-
@cpython_api([lltype.Signed, PyObject], rffi.INT_real, error=CANNOT_FAIL)
def PyThreadState_SetAsyncExc(space, id, exc):
"""Asynchronously raise an exception in a thread. The id argument is the thread
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.bytesobject import new_empty_str, PyStringObject
-from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP
+from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call
from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref
from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
@@ -145,6 +145,7 @@
"""
PyObject ** v;
PyObject * left = PyTuple_GetItem(args, 0);
+ Py_INCREF(left); /* the reference will be stolen! */
v = &left;
PyString_Concat(v, PyTuple_GetItem(args, 1));
return *v;
@@ -339,13 +340,16 @@
c_buf = py_str.c_ob_type.c_tp_as_buffer
assert c_buf
py_obj = rffi.cast(PyObject, py_str)
- assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1
+ assert generic_cpy_call(space, c_buf.c_bf_getsegcount,
+ py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1
ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
- assert c_buf.c_bf_getsegcount(py_obj, ref) == 1
+ assert generic_cpy_call(space, c_buf.c_bf_getsegcount,
+ py_obj, ref) == 1
assert ref[0] == 10
lltype.free(ref, flavor='raw')
ref = lltype.malloc(rffi.VOIDPP.TO, 1, flavor='raw')
- assert c_buf.c_bf_getreadbuffer(py_obj, 0, ref) == 10
+ assert generic_cpy_call(space, c_buf.c_bf_getreadbuffer,
+ py_obj, 0, ref) == 10
lltype.free(ref, flavor='raw')
Py_DecRef(space, py_obj)
@@ -359,6 +363,7 @@
assert space.str_w(from_ref(space, ptr[0])) == 'abcdef'
api.PyString_Concat(ptr, space.w_None)
assert not ptr[0]
+ api.PyErr_Clear()
ptr[0] = lltype.nullptr(PyObject.TO)
api.PyString_Concat(ptr, space.wrap('def')) # should not crash
lltype.free(ptr, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -254,13 +254,15 @@
class AppTestCpythonExtensionBase(LeakCheckingTest):
def setup_class(cls):
- cls.space.getbuiltinmodule("cpyext")
- from pypy.module.imp.importing import importhook
- importhook(cls.space, "os") # warm up reference counts
+ space = cls.space
+ space.getbuiltinmodule("cpyext")
+ # 'import os' to warm up reference counts
+ w_import = space.builtin.getdictvalue(space, '__import__')
+ space.call_function(w_import, space.wrap("os"))
#state = cls.space.fromcache(RefcountState) ZZZ
#state.non_heaptypes_w[:] = []
if not cls.runappdirect:
- cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
+ cls.w_runappdirect = space.wrap(cls.runappdirect)
def setup_method(self, func):
@gateway.unwrap_spec(name=str)
diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -72,6 +72,16 @@
date = datetime.datetime.fromtimestamp(0)
assert space.unwrap(space.str(w_date)) == str(date)
+ def test_tzinfo(self, space, api):
+ w_tzinfo = space.appexec(
+ [], """():
+ from datetime import tzinfo
+ return tzinfo()
+ """)
+ assert api.PyTZInfo_Check(w_tzinfo)
+ assert api.PyTZInfo_CheckExact(w_tzinfo)
+ assert not api.PyTZInfo_Check(space.w_None)
+
class AppTestDatetime(AppTestCpythonExtensionBase):
def test_CAPI(self):
module = self.import_extension('foo', [
@@ -82,11 +92,12 @@
PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
return NULL;
}
- return PyTuple_Pack(4,
+ return PyTuple_Pack(5,
PyDateTimeAPI->DateType,
PyDateTimeAPI->DateTimeType,
PyDateTimeAPI->TimeType,
- PyDateTimeAPI->DeltaType);
+ PyDateTimeAPI->DeltaType,
+ PyDateTimeAPI->TZInfoType);
"""),
("clear_types", "METH_NOARGS",
"""
@@ -94,13 +105,15 @@
Py_DECREF(PyDateTimeAPI->DateTimeType);
Py_DECREF(PyDateTimeAPI->TimeType);
Py_DECREF(PyDateTimeAPI->DeltaType);
+ Py_DECREF(PyDateTimeAPI->TZInfoType);
Py_RETURN_NONE;
"""
)
- ])
+ ], prologue='#include "datetime.h"\n')
import datetime
assert module.get_types() == (datetime.date,
datetime.datetime,
datetime.time,
- datetime.timedelta)
+ datetime.timedelta,
+ datetime.tzinfo)
module.clear_types()
diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py
--- a/pypy/module/cpyext/test/test_dictobject.py
+++ b/pypy/module/cpyext/test/test_dictobject.py
@@ -181,6 +181,7 @@
if (!PyArg_ParseTuple(args, "O", &dict))
return NULL;
proxydict = PyDictProxy_New(dict);
+#ifdef PYPY_VERSION // PyDictProxy_Check[Exact] are PyPy-specific.
if (!PyDictProxy_Check(proxydict)) {
Py_DECREF(proxydict);
PyErr_SetNone(PyExc_ValueError);
@@ -191,6 +192,7 @@
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
+#endif // PYPY_VERSION
i = PyObject_Size(proxydict);
Py_DECREF(proxydict);
return PyLong_FromLong(i);
diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py
--- a/pypy/module/cpyext/test/test_listobject.py
+++ b/pypy/module/cpyext/test/test_listobject.py
@@ -141,13 +141,14 @@
module = self.import_extension('foo', [
("test_get_item", "METH_NOARGS",
"""
- PyObject* o = PyList_New(1);
+ PyObject* o, *o2, *o3;
+ o = PyList_New(1);
- PyObject* o2 = PyInt_FromLong(0);
+ o2 = PyInt_FromLong(0);
PyList_SET_ITEM(o, 0, o2);
o2 = NULL;
- PyObject* o3 = PyList_GET_ITEM(o, 0);
+ o3 = PyList_GET_ITEM(o, 0);
Py_INCREF(o3);
Py_CLEAR(o);
return o3;
@@ -161,16 +162,17 @@
"""
PyObject* o = PyList_New(0);
PyObject* o2 = PyList_New(0);
+ Py_ssize_t refcount, new_refcount;
PyList_Append(o, o2); // does not steal o2
- Py_ssize_t refcount = Py_REFCNT(o2);
+ refcount = Py_REFCNT(o2);
// Steal a reference to o2, but leak the old reference to o2.
// The net result should be no change in refcount.
PyList_SET_ITEM(o, 0, o2);
- Py_ssize_t new_refcount = Py_REFCNT(o2);
+ new_refcount = Py_REFCNT(o2);
Py_CLEAR(o);
Py_DECREF(o2); // append incref'd.
diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py
--- a/pypy/module/cpyext/test/test_ndarrayobject.py
+++ b/pypy/module/cpyext/test/test_ndarrayobject.py
@@ -366,7 +366,7 @@
def test_ufunc(self):
if self.runappdirect:
from numpy import arange
- py.test.xfail('why does this segfault on cpython?')
+ py.test.xfail('segfaults on cpython: PyUFunc_API == NULL?')
else:
from _numpypy.multiarray import arange
mod = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -365,6 +365,8 @@
assert "in test_PyErr_Display\n" in output
assert "ZeroDivisionError" in output
+ @pytest.mark.skipif(True, reason=
+ "XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free")
def test_GetSetExcInfo(self):
import sys
if self.runappdirect and (sys.version_info.major < 3 or
diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py
--- a/pypy/module/cpyext/test/test_pystate.py
+++ b/pypy/module/cpyext/test/test_pystate.py
@@ -118,12 +118,13 @@
module = self.import_extension('foo', [
("bounce", "METH_NOARGS",
"""
+ PyThreadState * tstate;
if (PyEval_ThreadsInitialized() == 0)
{
PyEval_InitThreads();
}
PyGILState_Ensure();
- PyThreadState *tstate = PyEval_SaveThread();
+ tstate = PyEval_SaveThread();
if (tstate == NULL) {
return PyLong_FromLong(0);
}
diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py
--- a/pypy/module/cpyext/test/test_thread.py
+++ b/pypy/module/cpyext/test/test_thread.py
@@ -1,9 +1,12 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class AppTestThread(AppTestCpythonExtensionBase):
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_get_thread_ident(self):
module = self.import_extension('foo', [
("get_thread_ident", "METH_NOARGS",
@@ -30,6 +33,7 @@
assert results[0][0] != results[1][0]
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_acquire_lock(self):
module = self.import_extension('foo', [
("test_acquire_lock", "METH_NOARGS",
@@ -53,13 +57,14 @@
])
module.test_acquire_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_release_lock(self):
module = self.import_extension('foo', [
("test_release_lock", "METH_NOARGS",
"""
#ifndef PyThread_release_lock
#error "seems we are not accessing PyPy's functions"
-#endif
+#endif
PyThread_type_lock lock = PyThread_allocate_lock();
PyThread_acquire_lock(lock, 1);
PyThread_release_lock(lock);
@@ -74,6 +79,7 @@
])
module.test_release_lock()
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
def test_tls(self):
module = self.import_extension('foo', [
("create_key", "METH_NOARGS",
diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py
--- a/pypy/module/cpyext/test/test_tupleobject.py
+++ b/pypy/module/cpyext/test/test_tupleobject.py
@@ -84,7 +84,14 @@
"""
PyObject *item = PyTuple_New(0);
PyObject *t = PyTuple_New(1);
- if (t->ob_refcnt != 1 || item->ob_refcnt != 1) {
+#ifdef PYPY_VERSION
+ // PyPy starts even empty tuples with a refcount of 1.
+ const int initial_item_refcount = 1;
+#else
+ // CPython can cache ().
+ const int initial_item_refcount = item->ob_refcnt;
+#endif // PYPY_VERSION
+ if (t->ob_refcnt != 1 || item->ob_refcnt != initial_item_refcount) {
PyErr_SetString(PyExc_SystemError, "bad initial refcnt");
return NULL;
}
@@ -94,8 +101,8 @@
PyErr_SetString(PyExc_SystemError, "SetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "SetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
@@ -109,8 +116,8 @@
PyErr_SetString(PyExc_SystemError, "GetItem: t refcnt != 1");
return NULL;
}
- if (item->ob_refcnt != 1) {
- PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != 1");
+ if (item->ob_refcnt != initial_item_refcount) {
+ PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != initial_item_refcount");
return NULL;
}
return t;
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -24,8 +24,11 @@
if(PyUnicode_GetSize(s) != 11) {
result = -PyUnicode_GetSize(s);
}
+#ifdef PYPY_VERSION
+ // Slightly silly test that tp_basicsize is reasonable.
if(s->ob_type->tp_basicsize != sizeof(void*)*7)
result = s->ob_type->tp_basicsize;
+#endif // PYPY_VERSION
Py_DECREF(s);
return PyLong_FromLong(result);
"""),
@@ -85,8 +88,11 @@
'''
),
])
- res = module.test_hash(u"xyz")
- assert res == hash(u'xyz')
+ obj = u'xyz'
+ # CPython in particular does not precompute ->hash, so we need to call
+ # hash() first.
+ expected_hash = hash(obj)
+ assert module.test_hash(obj) == expected_hash
def test_default_encoded_string(self):
module = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -1,4 +1,6 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
@@ -22,8 +24,6 @@
PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION);
PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION);
PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION);
- PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
- PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
}
"""
module = self.import_module(name='foo', init=init)
@@ -31,6 +31,18 @@
assert module.py_major_version == sys.version_info.major
assert module.py_minor_version == sys.version_info.minor
assert module.py_micro_version == sys.version_info.micro
+
+ @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
+ def test_pypy_versions(self):
+ import sys
+ init = """
+ if (Py_IsInitialized()) {
+ PyObject *m = Py_InitModule("foo", NULL);
+ PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION);
+ PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM);
+ }
+ """
+ module = self.import_module(name='foo', init=init)
v = sys.pypy_version_info
s = '%d.%d.%d' % (v[0], v[1], v[2])
if v.releaselevel != 'final':
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -127,7 +127,7 @@
#_______________________________________________________________________
- at cpython_api([Py_ssize_t], PyObject)
+ at cpython_api([Py_ssize_t], PyObject, result_is_ll=True)
def PyTuple_New(space, size):
return rffi.cast(PyObject, new_empty_tuple(space, size))
@@ -150,7 +150,8 @@
decref(space, old_ref)
return 0
- at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True)
+ at cpython_api([PyObject, Py_ssize_t], PyObject,
+ result_borrowed=True, result_is_ll=True)
def PyTuple_GetItem(space, ref, index):
if not tuple_check_ref(space, ref):
PyErr_BadInternalCall(space)
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -752,7 +752,7 @@
w_type2 = from_ref(space, rffi.cast(PyObject, b))
return int(abstract_issubclass_w(space, w_type1, w_type2)) #XXX correct?
- at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject)
+ at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True)
def PyType_GenericAlloc(space, type, nitems):
from pypy.module.cpyext.object import _PyObject_NewVar
return _PyObject_NewVar(space, type, nitems)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -328,7 +328,7 @@
return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict')
- at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromUnicode(space, wchar_p, length):
"""Create a Unicode Object from the Py_UNICODE buffer u of the given size. u
may be NULL which causes the contents to be undefined. It is the user's
@@ -342,14 +342,14 @@
else:
return rffi.cast(PyObject, new_empty_unicode(space, length))
- at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromWideChar(space, wchar_p, length):
"""Create a Unicode object from the wchar_t buffer w of the given size.
Return NULL on failure."""
# PyPy supposes Py_UNICODE == wchar_t
return PyUnicode_FromUnicode(space, wchar_p, length)
- at cpython_api([PyObject, CONST_STRING], PyObject)
+ at cpython_api([PyObject, CONST_STRING], PyObject, result_is_ll=True)
def _PyUnicode_AsDefaultEncodedString(space, ref, errors):
# Returns a borrowed reference.
py_uni = rffi.cast(PyUnicodeObject, ref)
@@ -430,7 +430,7 @@
w_str = space.wrap(rffi.charp2str(s))
return space.call_method(w_str, 'decode', space.wrap("utf-8"))
- at cpython_api([CONST_STRING, Py_ssize_t], PyObject)
+ at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True)
def PyUnicode_FromStringAndSize(space, s, size):
"""Create a Unicode Object from the char buffer u. The bytes will be
interpreted as being UTF-8 encoded. u may also be NULL which causes the
diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py
--- a/pypy/module/operator/app_operator.py
+++ b/pypy/module/operator/app_operator.py
@@ -5,7 +5,6 @@
equivalent to x+y.
'''
-import types
import __pypy__
@@ -73,16 +72,14 @@
class attrgetter(object):
def __init__(self, attr, *attrs):
- if (
- not isinstance(attr, basestring) or
- not all(isinstance(a, basestring) for a in attrs)
- ):
- def _raise_typeerror(obj):
- raise TypeError(
- "argument must be a string, not %r" % type(attr).__name__
- )
- self._call = _raise_typeerror
- elif attrs:
+ if not isinstance(attr, basestring):
+ self._error(attr)
+ return
+ if attrs:
+ for a in attrs:
+ if not isinstance(a, basestring):
+ self._error(a)
+ return
self._multi_attrs = [
a.split(".") for a in [attr] + list(attrs)
]
@@ -94,6 +91,13 @@
self._single_attr = attr.split(".")
self._call = self._single_attrgetter
+ def _error(self, attr):
+ def _raise_typeerror(obj):
+ raise TypeError(
+ "attribute name must be a string, not %r" % type(attr).__name__
+ )
+ self._call = _raise_typeerror
+
def __call__(self, obj):
return self._call(obj)
diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py
--- a/pypy/module/operator/test/test_operator.py
+++ b/pypy/module/operator/test/test_operator.py
@@ -33,7 +33,8 @@
a.z = 'Z'
assert operator.attrgetter('x','z','y')(a) == ('X', 'Z', 'Y')
- raises(TypeError, operator.attrgetter('x', (), 'y'), a)
+ e = raises(TypeError, operator.attrgetter('x', (), 'y'), a)
+ assert str(e.value) == "attribute name must be a string, not 'tuple'"
data = map(str, range(20))
assert operator.itemgetter(2,10,5)(data) == ('2', '10', '5')
diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py
--- a/pypy/module/unicodedata/interp_ucd.py
+++ b/pypy/module/unicodedata/interp_ucd.py
@@ -4,7 +4,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.objectmodel import we_are_translated
@@ -34,8 +34,9 @@
# Target is wide build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode == 0xFFFF:
# Host CPython is narrow build, accept surrogates
@@ -54,8 +55,9 @@
# Target is narrow build
def unichr_to_code_w(space, w_unichr):
if not space.isinstance_w(w_unichr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- 'argument 1 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 1 must be unicode, not %T',
+ w_unichr)
if not we_are_translated() and sys.maxunicode > 0xFFFF:
# Host CPython is wide build, forbid surrogates
@@ -179,7 +181,9 @@
@unwrap_spec(form=str)
def normalize(self, space, form, w_unistr):
if not space.isinstance_w(w_unistr, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap('argument 2 must be unicode'))
+ raise oefmt(
+ space.w_TypeError, 'argument 2 must be unicode, not %T',
+ w_unistr)
if form == 'NFC':
composed = True
decomposition = self._canon_decomposition
diff --git a/pypy/module/unicodedata/test/test_unicodedata.py b/pypy/module/unicodedata/test/test_unicodedata.py
--- a/pypy/module/unicodedata/test/test_unicodedata.py
+++ b/pypy/module/unicodedata/test/test_unicodedata.py
@@ -78,10 +78,15 @@
import unicodedata
assert unicodedata.lookup("GOTHIC LETTER FAIHU") == u'\U00010346'
- def test_normalize(self):
+ def test_normalize_bad_argcount(self):
import unicodedata
raises(TypeError, unicodedata.normalize, 'x')
+ def test_normalize_nonunicode(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.normalize, 'NFC', 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
+
@py.test.mark.skipif("sys.maxunicode < 0x10ffff")
def test_normalize_wide(self):
import unicodedata
@@ -103,6 +108,12 @@
# For no reason, unicodedata.mirrored() returns an int, not a bool
assert repr(unicodedata.mirrored(u' ')) == '0'
- def test_bidirectional(self):
+ def test_bidirectional_not_one_character(self):
import unicodedata
- raises(TypeError, unicodedata.bidirectional, u'xx')
+ exc_info = raises(TypeError, unicodedata.bidirectional, u'xx')
+ assert str(exc_info.value) == 'need a single Unicode character as parameter'
+
+ def test_bidirectional_not_one_character(self):
+ import unicodedata
+ exc_info = raises(TypeError, unicodedata.bidirectional, 'x')
+ assert str(exc_info.value).endswith('must be unicode, not str')
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -277,7 +277,7 @@
def copy(self, obj):
result = Object()
result.space = self.space
- result._init_empty(self)
+ result._mapdict_init_empty(self)
return result
def length(self):
@@ -286,7 +286,7 @@
def set_terminator(self, obj, terminator):
result = Object()
result.space = self.space
- result._init_empty(terminator)
+ result._mapdict_init_empty(terminator)
return result
def remove_dict_entries(self, obj):
@@ -304,7 +304,7 @@
def materialize_r_dict(self, space, obj, dict_w):
result = Object()
result.space = space
- result._init_empty(self.devolved_dict_terminator)
+ result._mapdict_init_empty(self.devolved_dict_terminator)
return result
@@ -417,11 +417,6 @@
def __repr__(self):
return "" % (self.name, self.index, self.storageindex, self.back)
-def _become(w_obj, new_obj):
- # this is like the _become method, really, but we cannot use that due to
- # RPython reasons
- w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
-
class MapAttrCache(object):
def __init__(self, space):
SIZE = 1 << space.config.objspace.std.methodcachesizeexp
@@ -457,22 +452,12 @@
# everything that's needed to use mapdict for a user subclass at all.
# This immediately makes slots possible.
- # assumes presence of _init_empty, _mapdict_read_storage,
+ # assumes presence of _get_mapdict_map, _set_mapdict_map
+ # _mapdict_init_empty, _mapdict_read_storage,
# _mapdict_write_storage, _mapdict_storage_length,
# _set_mapdict_storage_and_map
# _____________________________________________
- # methods needed for mapdict
-
- def _become(self, new_obj):
- self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
-
- def _get_mapdict_map(self):
- return jit.promote(self.map)
- def _set_mapdict_map(self, map):
- self.map = map
-
- # _____________________________________________
# objspace interface
# class access
@@ -482,15 +467,14 @@
def setclass(self, space, w_cls):
new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator)
- self._become(new_obj)
+ self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
def user_setup(self, space, w_subtype):
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
- self.space = space
assert (not self.typedef.hasdict or
isinstance(w_subtype.terminator, NoDictTerminator) or
self.typedef is W_InstanceObject.typedef)
- self._init_empty(w_subtype.terminator)
+ self._mapdict_init_empty(w_subtype.terminator)
# methods needed for slots
@@ -508,7 +492,7 @@
new_obj = self._get_mapdict_map().delete(self, "slot", index)
if new_obj is None:
return False
- self._become(new_obj)
+ self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
return True
@@ -549,7 +533,7 @@
new_obj = self._get_mapdict_map().delete(self, attrname, DICT)
if new_obj is None:
return False
- self._become(new_obj)
+ self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
return True
def getdict(self, space):
@@ -599,7 +583,12 @@
assert flag
class MapdictStorageMixin(object):
- def _init_empty(self, map):
+ def _get_mapdict_map(self):
+ return jit.promote(self.map)
+ def _set_mapdict_map(self, map):
+ self.map = map
+
+ def _mapdict_init_empty(self, map):
from rpython.rlib.debug import make_sure_not_resized
self.map = map
self.storage = make_sure_not_resized([None] * map.size_estimate())
@@ -613,6 +602,7 @@
def _mapdict_storage_length(self):
return len(self.storage)
+
def _set_mapdict_storage_and_map(self, storage, map):
self.storage = storage
self.map = map
@@ -643,7 +633,11 @@
rangenmin1 = unroll.unrolling_iterable(range(nmin1))
valnmin1 = "_value%s" % nmin1
class subcls(object):
- def _init_empty(self, map):
+ def _get_mapdict_map(self):
+ return jit.promote(self.map)
+ def _set_mapdict_map(self, map):
+ self.map = map
+ def _mapdict_init_empty(self, map):
for i in rangenmin1:
setattr(self, "_value%s" % i, None)
setattr(self, valnmin1, erase_item(None))
@@ -731,7 +725,7 @@
def get_empty_storage(self):
w_result = Object()
terminator = self.space.fromcache(get_terminator_for_dicts)
- w_result._init_empty(terminator)
+ w_result._mapdict_init_empty(terminator)
return self.erase(w_result)
def switch_to_object_strategy(self, w_dict):
@@ -811,7 +805,7 @@
def clear(self, w_dict):
w_obj = self.unerase(w_dict.dstorage)
new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj)
- _become(w_obj, new_obj)
+ w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
def popitem(self, w_dict):
curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT)
@@ -836,7 +830,7 @@
def materialize_r_dict(space, obj, dict_w):
map = obj._get_mapdict_map()
new_obj = map.materialize_r_dict(space, obj, dict_w)
- _become(obj, new_obj)
+ obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
class MapDictIteratorKeys(BaseKeyIterator):
def __init__(self, space, strategy, dictimplementation):
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -560,7 +560,7 @@
msg = "Sign not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
- msg = "Alternate form not allowed in string format specifier"
+ msg = "Alternate form (#) not allowed in string format specifier"
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._align == "=":
msg = "'=' alignment not allowed in string format specifier"
@@ -920,7 +920,7 @@
flags = 0
default_precision = 6
if self._alternate:
- msg = "alternate form not allowed in float formats"
+ msg = "Alternate form (#) not allowed in float formats"
raise OperationError(space.w_ValueError, space.wrap(msg))
tp = self._type
self._get_locale(tp)
@@ -998,9 +998,9 @@
raise OperationError(space.w_ValueError, space.wrap(msg))
if self._alternate:
#alternate is invalid
- msg = "Alternate form %s not allowed in complex format specifier"
+ msg = "Alternate form (#) not allowed in complex format specifier"
raise OperationError(space.w_ValueError,
- space.wrap(msg % (self._alternate)))
+ space.wrap(msg))
skip_re = 0
add_parens = 0
if tp == "\0":
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -358,7 +358,7 @@
cls = cls.typedef.applevel_subclasses_base
#
subcls = get_unique_interplevel_subclass(
- self.config, cls, w_subtype.needsdel)
+ self, cls, w_subtype.needsdel)
instance = instantiate(subcls)
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -1,7 +1,7 @@
# Edit these appropriately before running this script
maj=5
min=1
-rev=0
+rev=1
branchname=release-$maj.x # ==OR== release-$maj.$min.x
tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev
diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/ll2ctypes.py
@@ -231,17 +231,7 @@
assert max_n >= 0
ITEM = A.OF
ctypes_item = get_ctypes_type(ITEM, delayed_builders)
- # Python 2.5 ctypes can raise OverflowError on 64-bit builds
- for n in [maxint, 2**31]:
- MAX_SIZE = n/64
- try:
- PtrType = ctypes.POINTER(MAX_SIZE * ctypes_item)
- except (OverflowError, AttributeError), e:
- pass # ^^^ bah, blame ctypes
- else:
- break
- else:
- raise e
+ ctypes_item_ptr = ctypes.POINTER(ctypes_item)
class CArray(ctypes.Structure):
if is_emulated_long:
@@ -265,35 +255,9 @@
bigarray.length = n
return bigarray
- _ptrtype = None
-
- @classmethod
- def _get_ptrtype(cls):
- if cls._ptrtype:
- return cls._ptrtype
- # ctypes can raise OverflowError on 64-bit builds
- # on windows it raises AttributeError even for 2**31 (_length_ missing)
- if _MS_WINDOWS:
- other_limit = 2**31-1
- else:
- other_limit = 2**31
- for n in [maxint, other_limit]:
- cls.MAX_SIZE = n / ctypes.sizeof(ctypes_item)
- try:
- cls._ptrtype = ctypes.POINTER(cls.MAX_SIZE * ctypes_item)
- except (OverflowError, AttributeError), e:
- pass
- else:
- break
- else:
- raise e
- return cls._ptrtype
-
def _indexable(self, index):
- PtrType = self._get_ptrtype()
- assert index + 1 < self.MAX_SIZE
- p = ctypes.cast(ctypes.pointer(self.items), PtrType)
- return p.contents
+ p = ctypes.cast(self.items, ctypes_item_ptr)
+ return p
def _getitem(self, index, boundscheck=True):
if boundscheck:
@@ -1045,12 +1009,22 @@
container = _array_of_known_length(T.TO)
container._storage = type(cobj)(cobj.contents)
elif isinstance(T.TO, lltype.FuncType):
+ # cobj is a CFunctionType object. We naively think
+ # that it should be a function pointer. No no no. If
+ # it was read out of an array, say, then it is a *pointer*
+ # to a function pointer. In other words, the read doesn't
+ # read anything, it just takes the address of the function
+ # pointer inside the array. If later the array is modified
+ # or goes out of scope, then we crash. CTypes is fun.
+ # It works if we cast it now to an int and back.
cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value)
if cobjkey in _int2obj:
container = _int2obj[cobjkey]
else:
+ name = getattr(cobj, '__name__', '?')
+ cobj = ctypes.cast(cobjkey, type(cobj))
_callable = get_ctypes_trampoline(T.TO, cobj)
- return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'),
+ return lltype.functionptr(T.TO, name,
_callable=_callable)
elif isinstance(T.TO, lltype.OpaqueType):
if T == llmemory.GCREF:
diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py
@@ -1405,6 +1405,45 @@
a2 = ctypes2lltype(lltype.Ptr(A), lltype2ctypes(a))
assert a2._obj.getitem(0)._obj._parentstructure() is a2._obj
+ def test_array_of_function_pointers(self):
+ c_source = py.code.Source(r"""
+ #include "src/precommondefs.h"
+ #include
+
From pypy.commits at gmail.com Mon May 2 15:58:49 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 02 May 2016 12:58:49 -0700 (PDT)
Subject: [pypy-commit] pypy default: remove DEBUG_REFCOUNT,
which completes TODO
Message-ID: <5727b179.634fc20a.61a20.77c6@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84146:089032a1e454
Date: 2016-05-02 14:01 +0300
http://bitbucket.org/pypy/pypy/changeset/089032a1e454/
Log: remove DEBUG_REFCOUNT, which completes TODO
diff --git a/TODO b/TODO
deleted file mode 100644
--- a/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* reduce size of generated c code from slot definitions in slotdefs.
-* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -152,17 +152,6 @@
class InvalidPointerException(Exception):
pass
-DEBUG_REFCOUNT = False
-
-def debug_refcount(*args, **kwargs):
- frame_stackdepth = kwargs.pop("frame_stackdepth", 2)
- assert not kwargs
- frame = sys._getframe(frame_stackdepth)
- print >>sys.stderr, "%25s" % (frame.f_code.co_name, ),
- for arg in args:
- print >>sys.stderr, arg,
- print >>sys.stderr
-
def create_ref(space, w_obj, itemcount=0):
"""
Allocates a PyObject, and fills its fields with info from the given
@@ -192,10 +181,6 @@
# XXX looks like a PyObject_GC_TRACK
assert py_obj.c_ob_refcnt < rawrefcount.REFCNT_FROM_PYPY
py_obj.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY
- if DEBUG_REFCOUNT:
- debug_refcount("MAKREF", py_obj, w_obj)
- assert w_obj
- assert py_obj
rawrefcount.create_link_pypy(w_obj, py_obj)
From pypy.commits at gmail.com Mon May 2 16:02:49 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 02 May 2016 13:02:49 -0700 (PDT)
Subject: [pypy-commit] pypy default: add release note
Message-ID: <5727b269.a272c20a.cb4c7.7ec6@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84147:44a31ad5303c
Date: 2016-05-02 23:01 +0300
http://bitbucket.org/pypy/pypy/changeset/44a31ad5303c/
Log: add release note
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
From pypy.commits at gmail.com Mon May 2 18:41:04 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Mon, 02 May 2016 15:41:04 -0700 (PDT)
Subject: [pypy-commit] pypy gc-forkfriendly: Oops. Put more cases of using
.tid behind possible pointer indirection.
Message-ID: <5727d780.2413c30a.5f78b.1e32@mx.google.com>
Author: Devin Jeanpierre
Branch: gc-forkfriendly
Changeset: r84149:fb71d0056319
Date: 2016-05-02 15:39 -0700
http://bitbucket.org/pypy/pypy/changeset/fb71d0056319/
Log: Oops. Put more cases of using .tid behind possible pointer
indirection.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1083,7 +1083,7 @@
# Simple helpers
def get_type_id(self, obj):
- tid = self.get_flags(obj)
+ tid = self.header(obj).tid
return llop.extract_ushort(llgroup.HALFWORD, tid)
def combine(self, typeid16, flags):
@@ -1384,14 +1384,13 @@
# 'newvalue'-less version, too. Moreover, the incremental
# GC nowadays relies on this fact.
self.old_objects_pointing_to_young.append(addr_struct)
- objhdr = self.header(addr_struct)
- objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
+ self.remove_flags(addr_struct, GCFLAG_TRACK_YOUNG_PTRS)
#
# Second part: if 'addr_struct' is actually a prebuilt GC
# object and it's the first time we see a write to it, we
# add it to the list 'prebuilt_root_objects'.
- if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
- objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
+ if self.get_flags(addr_struct) & GCFLAG_NO_HEAP_PTRS:
+ self.remove_flags(addr_struct, GCFLAG_NO_HEAP_PTRS)
self.prebuilt_root_objects.append(addr_struct)
remember_young_pointer._dont_inline_ = True
@@ -1409,8 +1408,7 @@
# item that is (or contains) the pointer that we write.
# We know that 'addr_array' has GCFLAG_TRACK_YOUNG_PTRS so far.
#
- objhdr = self.header(addr_array)
- if objhdr.tid & GCFLAG_HAS_CARDS == 0:
+ if self.get_flags(addr_array) & GCFLAG_HAS_CARDS == 0:
#
if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this
ll_assert(self.debug_is_old_object(addr_array),
@@ -1418,9 +1416,9 @@
#
# no cards, use default logic. Mostly copied from above.
self.old_objects_pointing_to_young.append(addr_array)
- objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
- if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
- objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
+ self.remove_flags(addr_array, GCFLAG_TRACK_YOUNG_PTRS)
+ if self.get_flags(addr_array) & GCFLAG_NO_HEAP_PTRS:
+ self.remove_flags(addr_array, GCFLAG_NO_HEAP_PTRS)
self.prebuilt_root_objects.append(addr_array)
return
#
@@ -1442,9 +1440,9 @@
# does not take 3 arguments).
addr_byte.char[0] = chr(byte | bitmask)
#
- if objhdr.tid & GCFLAG_CARDS_SET == 0:
+ if self.get_flags(addr_array) & GCFLAG_CARDS_SET == 0:
self.old_objects_with_cards_set.append(addr_array)
- objhdr.tid |= GCFLAG_CARDS_SET
+ self.add_flags(addr_array, GCFLAG_CARDS_SET)
remember_young_pointer_from_array2._dont_inline_ = True
assert self.card_page_indices > 0
@@ -1457,10 +1455,9 @@
# but GCFLAG_CARDS_SET is cleared. This tries to set
# GCFLAG_CARDS_SET if possible; otherwise, it falls back
# to remember_young_pointer().
- objhdr = self.header(addr_array)
- if objhdr.tid & GCFLAG_HAS_CARDS:
+ if self.get_flags(addr_array) & GCFLAG_HAS_CARDS:
self.old_objects_with_cards_set.append(addr_array)
- objhdr.tid |= GCFLAG_CARDS_SET
+ self.add_flags(addr_array, GCFLAG_CARDS_SET)
else:
self.remember_young_pointer(addr_array)
@@ -1493,22 +1490,22 @@
#
source_hdr = self.header(source_addr)
dest_hdr = self.header(dest_addr)
- if dest_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+ if self.get_flags(dest_addr) & GCFLAG_TRACK_YOUNG_PTRS == 0:
return True
# ^^^ a fast path of write-barrier
#
- if source_hdr.tid & GCFLAG_HAS_CARDS != 0:
+ if self.get_flags(source_addr) & GCFLAG_HAS_CARDS != 0:
#
- if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+ if self.get_flags(source_addr) & GCFLAG_TRACK_YOUNG_PTRS == 0:
# The source object may have random young pointers.
# Return False to mean "do it manually in ll_arraycopy".
return False
#
- if source_hdr.tid & GCFLAG_CARDS_SET == 0:
+ if self.get_flags(source_addr) & GCFLAG_CARDS_SET == 0:
# The source object has no young pointers at all. Done.
return True
#
- if dest_hdr.tid & GCFLAG_HAS_CARDS == 0:
+ if self.get_flags(dest_addr) & GCFLAG_HAS_CARDS == 0:
# The dest object doesn't have cards. Do it manually.
return False
#
@@ -1519,14 +1516,14 @@
self.manually_copy_card_bits(source_addr, dest_addr, length)
return True
#
- if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+ if self.get_flags(source_addr) & GCFLAG_TRACK_YOUNG_PTRS == 0:
# there might be in source a pointer to a young object
self.old_objects_pointing_to_young.append(dest_addr)
- dest_hdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
+ self.remove_flags(dest_addr, GCFLAG_TRACK_YOUNG_PTRS)
#
- if dest_hdr.tid & GCFLAG_NO_HEAP_PTRS:
- if source_hdr.tid & GCFLAG_NO_HEAP_PTRS == 0:
- dest_hdr.tid &= ~GCFLAG_NO_HEAP_PTRS
+ if self.get_flags(dest_addr) & GCFLAG_NO_HEAP_PTRS:
+ if self.get_flags(source_addr) & GCFLAG_NO_HEAP_PTRS == 0:
+ self.remove_flags(dest_addr, GCFLAG_NO_HEAP_PTRS)
self.prebuilt_root_objects.append(dest_addr)
return True
@@ -1547,9 +1544,9 @@
#
if anybyte:
dest_hdr = self.header(dest_addr)
- if dest_hdr.tid & GCFLAG_CARDS_SET == 0:
+ if self.get_flags(dest_addr) & GCFLAG_CARDS_SET == 0:
self.old_objects_with_cards_set.append(dest_addr)
- dest_hdr.tid |= GCFLAG_CARDS_SET
+ self.add_flags(dest_addr, GCFLAG_CARDS_SET)
def _wb_old_object_pointing_to_pinned(self, obj, ignore):
self.write_barrier(obj)
@@ -1947,7 +1944,6 @@
return
#
elif self._is_pinned(obj):
- hdr = self.header(obj)
#
# track parent of pinned object specially. This mus be done before
# checking for GCFLAG_VISITED: it may be that the same pinned object
@@ -1963,10 +1959,10 @@
self.updated_old_objects_pointing_to_pinned = True
self.set_flags(parent, GCFLAG_PINNED_OBJECT_PARENT_KNOWN)
#
- if hdr.tid & GCFLAG_VISITED:
+ if self.get_flags(obj) & GCFLAG_VISITED:
return
#
- hdr.tid |= GCFLAG_VISITED
+ self.add_flags(obj, GCFLAG_VISITED)
#
self.surviving_pinned_objects.append(
llarena.getfakearenaaddress(obj - size_gc_header))
@@ -2031,10 +2027,9 @@
# a bug in which dying young arrays with card marks would
# still be scanned before being freed, keeping a lot of
# objects unnecessarily alive.
- hdr = self.header(obj)
- if hdr.tid & GCFLAG_VISITED_RMY:
+ if self.get_flags(obj) & GCFLAG_VISITED_RMY:
return
- hdr.tid |= GCFLAG_VISITED_RMY
+ self.add_flags(obj, GCFLAG_VISITED_RMY)
#
# Accounting
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -2044,12 +2039,12 @@
# we just made 'obj' old, so we need to add it to the correct lists
added_somewhere = False
#
- if hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+ if self.get_flags(obj) & GCFLAG_TRACK_YOUNG_PTRS == 0:
self.old_objects_pointing_to_young.append(obj)
added_somewhere = True
#
- if hdr.tid & GCFLAG_HAS_CARDS != 0:
- ll_assert(hdr.tid & GCFLAG_CARDS_SET != 0,
+ if self.get_flags(obj) & GCFLAG_HAS_CARDS != 0:
+ ll_assert(self.get_flags(obj) & GCFLAG_CARDS_SET != 0,
"young array: GCFLAG_HAS_CARDS without GCFLAG_CARDS_SET")
self.old_objects_with_cards_set.append(obj)
added_somewhere = True
@@ -2476,19 +2471,19 @@
# flag GCFLAG_PINNED_OBJECT_PARENT_KNOWN is used during minor
# collections and shouldn't be set here either.
#
- hdr = self.header(obj)
- ll_assert((hdr.tid & GCFLAG_PINNED) == 0,
+ flags = self.get_flags(obj)
+ ll_assert((flags & GCFLAG_PINNED) == 0,
"pinned object in 'objects_to_trace'")
ll_assert(not self.is_in_nursery(obj),
"nursery object in 'objects_to_trace'")
- if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS):
+ if flags & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS):
return 0
#
# It's the first time. We set the flag VISITED. The trick is
# to also set TRACK_YOUNG_PTRS here, for the write barrier.
- hdr.tid |= GCFLAG_VISITED | GCFLAG_TRACK_YOUNG_PTRS
+ self.add_flags(obj, GCFLAG_VISITED | GCFLAG_TRACK_YOUNG_PTRS)
- if self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)):
+ if self.has_gcptr(self.get_type_id(obj)):
#
# Trace the content of the object and put all objects it references
# into the 'objects_to_trace' list.
@@ -2691,8 +2686,7 @@
def _bump_finalization_state_from_0_to_1(self, obj):
ll_assert(self._finalization_state(obj) == 0,
"unexpected finalization state != 0")
- hdr = self.header(obj)
- hdr.tid |= GCFLAG_FINALIZATION_ORDERING
+ self.add_flags(obj, GCFLAG_FINALIZATION_ORDERING)
def _recursively_bump_finalization_state_from_2_to_3(self, obj):
ll_assert(self._finalization_state(obj) == 2,
@@ -2702,9 +2696,8 @@
pending.append(obj)
while pending.non_empty():
y = pending.pop()
- hdr = self.header(y)
- if hdr.tid & GCFLAG_FINALIZATION_ORDERING: # state 2 ?
- hdr.tid &= ~GCFLAG_FINALIZATION_ORDERING # change to state 3
+ if self.get_flags(y) & GCFLAG_FINALIZATION_ORDERING: # state 2 ?
+ self.remove_flags(y, GCFLAG_FINALIZATION_ORDERING) # change to state 3
self.trace(y, self._append_if_nonnull, pending)
def _recursively_bump_finalization_state_from_1_to_2(self, obj):
@@ -3050,10 +3043,10 @@
return self.header(obj).tid
def set_flags(self, obj, flags):
- self.header(obj).tid=flags
+ self.header(obj).tid = flags
def add_flags(self, obj, flags):
- self.header(obj).tid|=flags
+ self.header(obj).tid |= flags
def remove_flags(self, obj, flags):
- self.header(obj).tid&=~flags
+ self.header(obj).tid &= ~flags
From pypy.commits at gmail.com Mon May 2 18:41:02 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Mon, 02 May 2016 15:41:02 -0700 (PDT)
Subject: [pypy-commit] pypy gc-forkfriendly: hg merge default
Message-ID: <5727d77e.c42e1c0a.ad2b6.ffffc265@mx.google.com>
Author: Devin Jeanpierre
Branch: gc-forkfriendly
Changeset: r84148:eb1b0eb99f2f
Date: 2016-05-02 10:50 -0700
http://bitbucket.org/pypy/pypy/changeset/eb1b0eb99f2f/
Log: hg merge default
diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py
--- a/rpython/translator/backendopt/test/test_finalizer.py
+++ b/rpython/translator/backendopt/test/test_finalizer.py
@@ -35,31 +35,6 @@
r = self.analyze(f, [])
assert not r
-def test_various_ops():
- from rpython.flowspace.model import SpaceOperation, Constant
-
- X = lltype.Ptr(lltype.GcStruct('X'))
- Z = lltype.Ptr(lltype.Struct('Z'))
- S = lltype.GcStruct('S', ('x', lltype.Signed),
- ('y', X),
- ('z', Z))
- v1 = varoftype(lltype.Bool)
- v2 = varoftype(lltype.Signed)
- f = FinalizerAnalyzer(None)
- r = f.analyze(SpaceOperation('cast_int_to_bool', [v2],
- v1))
- assert not r
- v1 = varoftype(lltype.Ptr(S))
- v2 = varoftype(lltype.Signed)
- v3 = varoftype(X)
- v4 = varoftype(Z)
- assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('x'),
- v2], None))
- assert f.analyze(SpaceOperation('bare_setfield', [v1, Constant('y'),
- v3], None))
- assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'),
- v4], None))
-
def test_malloc(self):
S = lltype.GcStruct('S')
@@ -100,6 +75,22 @@
lltype.free(p, flavor='raw')
r = self.analyze(g, [], f, backendopt=True)
+ assert r
+
+ def test_c_call_without_release_gil(self):
+ C = rffi.CArray(lltype.Signed)
+ c = rffi.llexternal('x', [lltype.Ptr(C)], lltype.Signed,
+ releasegil=False)
+
+ def g():
+ p = lltype.malloc(C, 3, flavor='raw')
+ f(p)
+
+ def f(p):
+ c(rffi.ptradd(p, 0))
+ lltype.free(p, flavor='raw')
+
+ r = self.analyze(g, [], f, backendopt=True)
assert not r
def test_chain(self):
@@ -131,3 +122,30 @@
pass
self.analyze(g, []) # did not explode
py.test.raises(FinalizerError, self.analyze, f, [])
+
+
+def test_various_ops():
+ from rpython.flowspace.model import SpaceOperation, Constant
+
+ X = lltype.Ptr(lltype.GcStruct('X'))
+ Z = lltype.Ptr(lltype.Struct('Z'))
+ S = lltype.GcStruct('S', ('x', lltype.Signed),
+ ('y', X),
+ ('z', Z))
+ v1 = varoftype(lltype.Bool)
+ v2 = varoftype(lltype.Signed)
+ f = FinalizerAnalyzer(None)
+ r = f.analyze(SpaceOperation('cast_int_to_bool', [v2],
+ v1))
+ assert not r
+ v1 = varoftype(lltype.Ptr(S))
+ v2 = varoftype(lltype.Signed)
+ v3 = varoftype(X)
+ v4 = varoftype(Z)
+ assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('x'),
+ v2], None))
+ assert f.analyze(SpaceOperation('bare_setfield', [v1, Constant('y'),
+ v3], None))
+ assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'),
+ v4], None))
+
From pypy.commits at gmail.com Mon May 2 18:53:45 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 15:53:45 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: merge default
Message-ID: <5727da79.8d1f1c0a.a4361.5296@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84150:2d2225da8be0
Date: 2016-05-02 15:51 -0700
http://bitbucket.org/pypy/pypy/changeset/2d2225da8be0/
Log: merge default
diff too long, truncating to 2000 out of 10370 lines
diff --git a/TODO b/TODO
deleted file mode 100644
--- a/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* reduce size of generated c code from slot definitions in slotdefs.
-* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,7 +352,7 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -52,7 +52,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -77,7 +77,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return '?'
raise
@@ -318,7 +318,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -406,7 +406,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +440,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +476,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -763,7 +763,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -771,7 +771,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -871,7 +871,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -895,7 +895,7 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -940,7 +940,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -950,7 +950,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -1047,7 +1047,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1165,7 +1165,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1285,7 +1285,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1361,7 +1361,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1371,7 +1371,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1519,7 +1519,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1693,7 +1693,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1704,7 +1704,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1719,7 +1719,7 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
raise oefmt(self.w_TypeError,
"argument must be an int, or have a fileno() "
@@ -1732,7 +1732,7 @@
"fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -563,7 +563,7 @@
while pending is not None:
try:
pending.callback(pending.w_obj)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(space, pending.descrname, pending.w_obj)
e.clear(space) # break up reference cycles
pending = pending.next
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -539,7 +539,7 @@
try:
return space.call_method(space.w_object, '__getattribute__',
space.wrap(self), w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# fall-back to the attribute of the underlying 'im_func'
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -686,7 +686,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -702,7 +702,7 @@
raise OperationError(space.w_KeyboardInterrupt, space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
raise oefmt(space.w_RuntimeError,
"maximum recursion depth exceeded")
@@ -724,7 +724,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -745,7 +745,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args.prepend(w_obj))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -762,7 +762,7 @@
w_result = self.fastfunc_0(space)
except DescrMismatch:
raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -782,7 +782,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -802,7 +802,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -822,7 +822,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2, w3]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -843,7 +843,7 @@
self.descr_reqcls,
Arguments(space,
[w1, w2, w3, w4]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -144,7 +144,7 @@
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
space.w_None)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration) or \
e.match(space, space.w_GeneratorExit):
return space.w_None
@@ -196,7 +196,7 @@
results=results, pycode=pycode)
try:
w_result = frame.execute_frame(space.w_None)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -8,7 +8,7 @@
w_modules = space.sys.get('modules')
try:
return space.getitem(w_modules, w_main)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
mainmodule = module.Module(space, w_main)
@@ -52,7 +52,7 @@
else:
return
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.record_interpreter_traceback()
raise
@@ -110,7 +110,7 @@
try:
w_stdout = space.sys.get('stdout')
w_softspace = space.getattr(w_stdout, space.wrap('softspace'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# Don't crash if user defined stdout doesn't have softspace
@@ -118,7 +118,7 @@
if space.is_true(w_softspace):
space.call_method(w_stdout, 'write', space.wrap('\n'))
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.normalize_exception(space)
w_type = operationerr.w_type
w_value = operationerr.get_w_value(space)
@@ -162,7 +162,7 @@
space.call_function(w_hook, w_type, w_value, w_traceback)
return False # done
- except OperationError, err2:
+ except OperationError as err2:
# XXX should we go through sys.get('stderr') ?
print >> sys.stderr, 'Error calling sys.excepthook:'
err2.print_application_traceback(space)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -169,7 +169,7 @@
while 1:
try:
value = eval(spec, d)
- except NameError, ex:
+ except NameError as ex:
name = ex.args[0].split("'")[1] # super-Evil
if name in d:
raise # propagate the NameError
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -110,7 +110,7 @@
if code_hook is not None:
try:
self.space.call_function(code_hook, self)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -55,21 +55,21 @@
try:
code = self.compile(source, filename, mode, flags)
return code # success
- except OperationError, err:
+ except OperationError as err:
if not err.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n", filename, mode, flags)
return None # expect more
- except OperationError, err1:
+ except OperationError as err1:
if not err1.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n\n", filename, mode, flags)
raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
+ except OperationError as err2:
if not err2.match(space, space.w_SyntaxError):
raise
@@ -130,7 +130,7 @@
try:
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return code
@@ -143,9 +143,9 @@
try:
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
- except parseerror.IndentationError, e:
+ except parseerror.IndentationError as e:
raise OperationError(space.w_IndentationError, e.wrap_info(space))
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return mod
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -550,7 +550,7 @@
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
- raise ValueError, "new fastscope is longer than the allocated area"
+ raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -67,9 +67,9 @@
def handle_bytecode(self, co_code, next_instr, ec):
try:
next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
- except OperationError, operr:
+ except OperationError as operr:
next_instr = self.handle_operation_error(ec, operr)
- except RaiseWithExplicitTraceback, e:
+ except RaiseWithExplicitTraceback as e:
next_instr = self.handle_operation_error(ec, e.operr,
attach_tb=False)
except KeyboardInterrupt:
@@ -78,7 +78,7 @@
except MemoryError:
next_instr = self.handle_asynchronous_error(ec,
self.space.w_MemoryError)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
# Note that this case catches AttributeError!
rstackovf.check_stack_overflow()
next_instr = self.handle_asynchronous_error(ec,
@@ -117,7 +117,7 @@
finally:
if trace is not None:
self.getorcreatedebug().w_f_trace = trace
- except OperationError, e:
+ except OperationError as e:
operr = e
pytraceback.record_application_traceback(
self.space, operr, self, self.last_instr)
@@ -844,7 +844,7 @@
w_varname = self.getname_w(varindex)
try:
self.space.delitem(self.getorcreatedebug().w_locals, w_varname)
- except OperationError, e:
+ except OperationError as e:
# catch KeyErrors and turn them into NameErrors
if not e.match(self.space, self.space.w_KeyError):
raise
@@ -1003,7 +1003,7 @@
try:
if space.int_w(w_flag) == -1:
w_flag = None
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
@@ -1040,7 +1040,7 @@
w_module = self.peekvalue()
try:
w_obj = self.space.getattr(w_module, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
raise oefmt(self.space.w_ImportError,
@@ -1099,7 +1099,7 @@
w_iterator = self.peekvalue()
try:
w_nextitem = self.space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_StopIteration):
raise
# iterator exhausted
@@ -1110,7 +1110,7 @@
return next_instr
def FOR_LOOP(self, oparg, next_instr):
- raise BytecodeCorruption, "old opcode, no longer in use"
+ raise BytecodeCorruption("old opcode, no longer in use")
def SETUP_LOOP(self, offsettoend, next_instr):
block = LoopBlock(self, next_instr + offsettoend, self.lastblock)
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -118,7 +118,7 @@
if enc is not None and enc not in ('utf-8', 'iso-8859-1'):
try:
textsrc = recode_to_utf8(self.space, textsrc, enc)
- except OperationError, e:
+ except OperationError as e:
# if the codec is not found, LookupError is raised. we
# check using 'is_w' not to mask potential IndexError or
# KeyError
@@ -164,10 +164,10 @@
for tp, value, lineno, column, line in tokens:
if self.add_token(tp, value, lineno, column, line):
break
- except error.TokenError, e:
+ except error.TokenError as e:
e.filename = compile_info.filename
raise
- except parser.ParseError, e:
+ except parser.ParseError as e:
# Catch parse errors, pretty them up and reraise them as a
# SyntaxError.
new_err = error.IndentationError
diff --git a/pypy/interpreter/pyparser/test/unittest_samples.py b/pypy/interpreter/pyparser/test/unittest_samples.py
--- a/pypy/interpreter/pyparser/test/unittest_samples.py
+++ b/pypy/interpreter/pyparser/test/unittest_samples.py
@@ -66,7 +66,7 @@
print
try:
assert_tuples_equal(pypy_tuples, python_tuples)
- except AssertionError,e:
+ except AssertionError as e:
error_path = e.args[-1]
print "ERROR PATH =", error_path
print "="*80
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -224,7 +224,7 @@
def _spawn(self, *args, **kwds):
try:
import pexpect
- except ImportError, e:
+ except ImportError as e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -618,14 +618,14 @@
space = self.space
try:
Arguments(space, [], w_stararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after * must be a sequence, not int"
else:
assert 0, "did not raise"
try:
Arguments(space, [], w_starstararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after ** must be a mapping, not int"
else:
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -696,7 +696,7 @@
""")
try:
self.compiler.compile(str(source), '', 'exec', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -706,7 +706,7 @@
code = 'def f(): (yield bar) += y'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -716,7 +716,7 @@
code = 'dict(a = i for i in xrange(10))'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -1011,7 +1011,7 @@
"""
try:
exec source
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unindent does not match any outer indentation level'
else:
raise Exception("DID NOT RAISE")
@@ -1021,13 +1021,13 @@
source2 = "x = (\n\n"
try:
exec source1
- except SyntaxError, err1:
+ except SyntaxError as err1:
pass
else:
raise Exception("DID NOT RAISE")
try:
exec source2
- except SyntaxError, err2:
+ except SyntaxError as err2:
pass
else:
raise Exception("DID NOT RAISE")
diff --git a/pypy/interpreter/test/test_exceptcomp.py b/pypy/interpreter/test/test_exceptcomp.py
--- a/pypy/interpreter/test/test_exceptcomp.py
+++ b/pypy/interpreter/test/test_exceptcomp.py
@@ -7,7 +7,7 @@
def test_exception(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except TypeError:
pass
except:
@@ -15,7 +15,7 @@
def test_exceptionfail(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except KeyError:
self.fail("Different exceptions match.")
except TypeError:
@@ -47,7 +47,7 @@
class UserExcept(Exception):
pass
try:
- raise UserExcept, "nothing"
+ raise UserExcept("nothing")
except UserExcept:
pass
except:
diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
--- a/pypy/interpreter/test/test_exec.py
+++ b/pypy/interpreter/test/test_exec.py
@@ -196,11 +196,11 @@
def test_filename(self):
try:
exec "'unmatched_quote"
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
try:
eval("'unmatched_quote")
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
def test_exec_and_name_lookups(self):
@@ -213,7 +213,7 @@
try:
res = f()
- except NameError, e: # keep py.test from exploding confused
+ except NameError as e: # keep py.test from exploding confused
raise e
assert res == 1
diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -296,14 +296,14 @@
def test_call_error_message(self):
try:
len()
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (0 given)" in e.message
else:
assert 0, "did not raise"
try:
len(1, 2)
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (2 given)" in e.message
else:
assert 0, "did not raise"
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -26,7 +26,7 @@
wrappedfunc = space.getitem(w_glob, w(functionname))
try:
w_output = space.call_function(wrappedfunc, *wrappedargs)
- except error.OperationError, e:
+ except error.OperationError as e:
#e.print_detailed_traceback(space)
return '<<<%s>>>' % e.errorstr(space)
else:
@@ -331,7 +331,7 @@
def f(): f()
try:
f()
- except RuntimeError, e:
+ except RuntimeError as e:
assert str(e) == "maximum recursion depth exceeded"
else:
assert 0, "should have raised!"
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -86,7 +86,7 @@
""")
try:
space.unpackiterable(w_a)
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_ZeroDivisionError):
raise Exception("DID NOT RAISE")
else:
@@ -237,7 +237,7 @@
self.space.getindex_w, w_instance2, self.space.w_IndexError)
try:
self.space.getindex_w(self.space.w_tuple, None, "foobar")
- except OperationError, e:
+ except OperationError as e:
assert e.match(self.space, self.space.w_TypeError)
assert "foobar" in e.errorstr(self.space)
else:
diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -376,7 +376,7 @@
def g():
try:
raise Exception
- except Exception, e:
+ except Exception as e:
import sys
raise Exception, e, sys.exc_info()[2]
diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py
--- a/pypy/interpreter/test/test_raise.py
+++ b/pypy/interpreter/test/test_raise.py
@@ -18,34 +18,34 @@
def test_1arg(self):
try:
raise SystemError, 1
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
def test_2args(self):
try:
raise SystemError, (1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_instancearg(self):
try:
raise SystemError, SystemError(1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_more_precise_instancearg(self):
try:
raise Exception, SystemError(1, 2)
- except SystemError, e:
+ except SystemError as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_builtin_exc(self):
try:
[][0]
- except IndexError, e:
+ except IndexError as e:
assert isinstance(e, IndexError)
def test_raise_cls(self):
@@ -194,7 +194,7 @@
raise Sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
sub = Sub()
@@ -202,14 +202,14 @@
raise sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
try:
raise A, sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
assert sub.val is None
@@ -217,13 +217,13 @@
raise Sub, 42
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
assert a.val == 42
try:
{}[5]
- except A, a:
+ except A as a:
assert 0
except KeyError:
pass
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -254,7 +254,7 @@
space.wrap(s),
space.wrap('?'),
space.wrap('exec'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_SyntaxError):
raise
else:
@@ -723,7 +723,7 @@
line4 = "if ?: pass\n"
try:
exec "print\nprint\nprint\n" + line4
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 4
assert e.text == line4
assert e.offset == e.text.index('?') + 1
@@ -738,7 +738,7 @@
a b c d e
bar
"""
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 4
assert e.text.endswith('a b c d e\n')
assert e.offset == e.text.index('b')
@@ -749,7 +749,7 @@
program = "(1, 2) += (3, 4)\n"
try:
exec program
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 1
assert e.text is None
else:
@@ -769,7 +769,7 @@
for s in VALID:
try:
compile(s, '?', 'exec')
- except Exception, e:
+ except Exception as e:
print '-'*20, 'FAILED TO COMPILE:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
@@ -777,7 +777,7 @@
for s in INVALID:
try:
raises(SyntaxError, compile, s, '?', 'exec')
- except Exception ,e:
+ except Exception as e:
print '-'*20, 'UNEXPECTEDLY COMPILED:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -13,7 +13,7 @@
# XXX why is this called newstring?
import sys
def f():
- raise TypeError, "hello"
+ raise TypeError("hello")
def g():
f()
@@ -23,7 +23,7 @@
except:
typ,val,tb = sys.exc_info()
else:
- raise AssertionError, "should have raised"
+ raise AssertionError("should have raised")
assert hasattr(tb, 'tb_frame')
assert hasattr(tb, 'tb_lasti')
assert hasattr(tb, 'tb_lineno')
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -520,7 +520,7 @@
def f(): yield 42
f().__reduce__()
""")
- except TypeError, e:
+ except TypeError as e:
if 'pickle generator' not in str(e):
raise
py.test.skip("Frames can't be __reduce__()-ed")
diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py
--- a/pypy/module/__builtin__/__init__.py
+++ b/pypy/module/__builtin__/__init__.py
@@ -102,7 +102,7 @@
space = self.space
try:
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
else:
diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py
--- a/pypy/module/__builtin__/abstractinst.py
+++ b/pypy/module/__builtin__/abstractinst.py
@@ -21,7 +21,7 @@
"""
try:
w_bases = space.getattr(w_cls, space.wrap('__bases__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return None
@@ -41,7 +41,7 @@
def abstract_getclass(space, w_obj):
try:
return space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return space.type(w_obj)
@@ -63,7 +63,7 @@
w_result = space.isinstance_allow_override(w_obj, w_klass_or_tuple)
else:
w_result = space.isinstance(w_obj, w_klass_or_tuple)
- except OperationError, e: # if w_klass_or_tuple was not a type, ignore it
+ except OperationError as e: # if w_klass_or_tuple was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
@@ -81,7 +81,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_pretendtype, w_klass_or_tuple)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
return False # ignore most exceptions
@@ -102,7 +102,7 @@
" or tuple of classes and types")
try:
w_abstractclass = space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if e.async(space): # ignore most exceptions
raise
return False
@@ -142,7 +142,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_derived, w_klass_or_tuple)
- except OperationError, e: # if one of the args was not a type, ignore it
+ except OperationError as e: # if one of the args was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -62,7 +62,7 @@
else:
try:
w_type = space.getattr(w_obj_or_type, space.wrap('__class__'))
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_AttributeError):
raise
w_type = w_objtype
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -78,7 +78,7 @@
start = space.int_w(w_start)
stop = space.int_w(w_stop)
step = space.int_w(w_step)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_OverflowError):
raise
return range_with_longs(space, w_start, w_stop, w_step)
@@ -175,7 +175,7 @@
jitdriver.jit_merge_point(has_key=has_key, has_item=has_item, w_type=w_type)
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
@@ -353,7 +353,7 @@
w_index = space.wrap(self.remaining)
try:
w_item = space.getitem(self.w_sequence, w_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -145,7 +145,7 @@
"cannot delete attribute '%s'", name)
try:
space.delitem(self.w_dict, w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
raise oefmt(space.w_AttributeError,
@@ -165,7 +165,7 @@
def get_module_string(self, space):
try:
w_mod = self.descr_getattribute(space, "__module__")
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return "?"
@@ -230,7 +230,7 @@
def binaryop(self, space, w_other):
try:
w_meth = self.getattr(space, name, False)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
return space.w_NotImplemented
raise
@@ -278,7 +278,7 @@
def _coerce_helper(space, w_self, w_other):
try:
w_tup = space.coerce(w_self, w_other)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
return [w_self, w_other]
@@ -336,7 +336,7 @@
if w_meth is not None:
try:
return space.call_function(w_meth, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not exc and e.match(space, space.w_AttributeError):
return None # eat the AttributeError
raise
@@ -519,7 +519,7 @@
return w_res
try:
res = space.int_w(w_res)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"__cmp__ must return int")
@@ -537,7 +537,7 @@
return w_res
try:
res = space.int_w(w_res)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"__cmp__ must return int")
@@ -599,7 +599,7 @@
while 1:
try:
w_x = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
return space.w_False
raise
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -62,7 +62,7 @@
w_name = checkattrname(space, w_name)
try:
return space.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
if w_defvalue is not None:
if e.match(space, space.w_AttributeError):
return w_defvalue
@@ -190,7 +190,7 @@
is exhausted, it is returned instead of raising StopIteration."""
try:
return space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if w_default is not None and e.match(space, space.w_StopIteration):
return w_default
raise
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -688,7 +688,7 @@
def test_catch_attributeerror_of_descriptor(self):
def booh(self):
- raise this_exception, "booh"
+ raise this_exception("booh")
class E:
__eq__ = property(booh)
diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py
--- a/pypy/module/__builtin__/test/test_descriptor.py
+++ b/pypy/module/__builtin__/test/test_descriptor.py
@@ -93,7 +93,7 @@
def test_super_fail(self):
try:
super(list, 2)
- except TypeError, e:
+ except TypeError as e:
message = e.args[0]
assert message.startswith('super(type, obj): obj must be an instance or subtype of type')
@@ -303,7 +303,7 @@
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
- except TypeError, msg:
+ except TypeError as msg:
if str(msg).find('readonly') < 0:
raise Exception("when setting readonly attr %r on a "
"property, got unexpected TypeError "
@@ -322,7 +322,7 @@
except ZeroDivisionError:
pass
else:
- raise Exception, "expected ZeroDivisionError from bad property"
+ raise Exception("expected ZeroDivisionError from bad property")
def test_property_subclass(self):
class P(property):
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -105,7 +105,7 @@
def validate_fd(space, fd):
try:
rposix.validate_fd(fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
def get_console_cp(space):
diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py
--- a/pypy/module/__pypy__/test/test_signal.py
+++ b/pypy/module/__pypy__/test/test_signal.py
@@ -35,7 +35,7 @@
for i in range(10):
print('x')
time.sleep(0.25)
- except BaseException, e:
+ except BaseException as e:
interrupted.append(e)
finally:
print('subthread stops, interrupted=%r' % (interrupted,))
@@ -120,7 +120,7 @@
time.sleep(0.5)
with __pypy__.thread.signals_enabled:
thread.interrupt_main()
- except BaseException, e:
+ except BaseException as e:
interrupted.append(e)
finally:
lock.release()
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -112,7 +112,7 @@
must_leave = space.threadlocals.try_enter_thread(space)
self.py_invoke(ll_res, ll_args)
#
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "SystemError: callback raised ")
@@ -142,7 +142,7 @@
w_res = space.call(self.w_callable, w_args)
extra_line = "Trying to convert the result back to C:\n"
self.convert_result(ll_res, w_res)
- except OperationError, e:
+ except OperationError as e:
self.handle_applevel_exception(e, ll_res, extra_line)
@jit.unroll_safe
@@ -187,7 +187,7 @@
w_res = space.call_function(self.w_onerror, w_t, w_v, w_tb)
if not space.is_none(w_res):
self.convert_result(ll_res, w_res)
- except OperationError, e2:
+ except OperationError as e2:
# double exception! print a double-traceback...
self.print_error(e, extra_line) # original traceback
e2.write_unraisable(space, '', with_traceback=True,
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -244,7 +244,7 @@
for i in range(length):
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise oefmt(space.w_ValueError,
@@ -253,7 +253,7 @@
target = rffi.ptradd(target, ctitemsize)
try:
space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -21,7 +21,7 @@
filename = ""
try:
handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -50,7 +50,7 @@
builder = CifDescrBuilder(fargs, fresult, abi)
try:
builder.rawallocate(self)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_NotImplementedError):
raise
# else, eat the NotImplementedError. We will get the
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -177,12 +177,12 @@
space = self.space
try:
fieldname = space.str_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
try:
index = space.int_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise oefmt(space.w_TypeError,
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -378,6 +378,6 @@
raise oefmt(space.w_ValueError, "file has no OS file descriptor")
try:
w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py
--- a/pypy/module/_cffi_backend/embedding.py
+++ b/pypy/module/_cffi_backend/embedding.py
@@ -79,7 +79,7 @@
patch_sys(space)
load_embedded_cffi_module(space, version, init_struct)
res = 0
- except OperationError, operr:
+ except OperationError as operr:
operr.write_unraisable(space, "initialization of '%s'" % name,
with_traceback=True)
space.appexec([], r"""():
@@ -91,7 +91,7 @@
res = -1
if must_leave:
space.threadlocals.leave_thread(space)
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "From initialization of '")
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -108,7 +108,7 @@
# w.r.t. buffers and memoryviews??
try:
buf = space.readbuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_SIMPLE)
@@ -117,7 +117,7 @@
def _fetch_as_write_buffer(space, w_x):
try:
buf = space.writebuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_WRITABLE)
diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -39,7 +39,7 @@
mod = __import__(modname, None, None, ['ffi', 'lib'])
return mod.lib""")
lib1 = space.interp_w(W_LibObject, w_lib1)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
raise oefmt(space.w_ImportError,
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -24,7 +24,7 @@
filename = ""
try:
self.handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -132,7 +132,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -149,7 +149,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -172,7 +172,7 @@
return r_ulonglong(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
@@ -197,7 +197,7 @@
return r_uint(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -171,7 +171,7 @@
w_start = space.getattr(w_exc, space.wrap('start'))
w_end = space.getattr(w_exc, space.wrap('end'))
w_obj = space.getattr(w_exc, space.wrap('object'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
raise oefmt(space.w_TypeError, "wrong exception")
@@ -523,7 +523,7 @@
else:
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -556,7 +556,7 @@
# get the character from the mapping
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -635,7 +635,7 @@
space = self.space
try:
w_code = space.call_function(self.w_getcode, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
return -1
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -458,7 +458,7 @@
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
- except UnicodeDecodeError, ex:
+ except UnicodeDecodeError as ex:
assert "unicode_internal" == ex.encoding
assert "\x00\x00\x00\x00\x00\x11\x11\x00" == ex.object
assert ex.start == 4
@@ -650,7 +650,7 @@
def test_utf7_start_end_in_exception(self):
try:
'+IC'.decode('utf-7')
- except UnicodeDecodeError, exc:
+ except UnicodeDecodeError as exc:
assert exc.start == 0
assert exc.end == 3
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -168,7 +168,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
@@ -190,7 +190,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py
--- a/pypy/module/_collections/test/test_defaultdict.py
+++ b/pypy/module/_collections/test/test_defaultdict.py
@@ -26,7 +26,7 @@
for key in ['foo', (1,)]:
try:
d1[key]
- except KeyError, err:
+ except KeyError as err:
assert err.args[0] == key
else:
assert 0, "expected KeyError"
diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -224,7 +224,7 @@
try:
frame = self.bottomframe
w_result = frame.execute_frame()
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py
--- a/pypy/module/_continuation/interp_pickle.py
+++ b/pypy/module/_continuation/interp_pickle.py
@@ -69,7 +69,7 @@
try:
w_result = post_switch(sthread, h)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
#
@@ -88,7 +88,7 @@
try:
w_result = frame.execute_frame(w_result, operr)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
if exit_continulet is not None:
@@ -97,7 +97,7 @@
sthread.ec.topframeref = jit.vref_None
if operr:
raise operr
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py
--- a/pypy/module/_continuation/test/support.py
+++ b/pypy/module/_continuation/test/support.py
@@ -8,6 +8,6 @@
def setup_class(cls):
try:
import rpython.rlib.rstacklet
- except CompilationError, e:
+ except CompilationError as e:
py.test.skip("cannot import rstacklet: %s" % e)
diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -553,11 +553,11 @@
res = "got keyerror"
try:
c1.switch(res)
- except IndexError, e:
+ except IndexError as e:
pass
try:
c1.switch(e)
- except IndexError, e2:
+ except IndexError as e2:
pass
try:
c1.switch(e2)
diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py
--- a/pypy/module/_csv/interp_reader.py
+++ b/pypy/module/_csv/interp_reader.py
@@ -65,7 +65,7 @@
while True:
try:
w_line = space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
if (field_builder is not None and
state != START_RECORD and state != EAT_CRNL and
diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py
--- a/pypy/module/_csv/interp_writer.py
+++ b/pypy/module/_csv/interp_writer.py
@@ -49,7 +49,7 @@
try:
space.float_w(w_field) # is it an int/long/float?
quoted = False
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
quoted = True
@@ -124,7 +124,7 @@
while True:
try:
w_seq = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -56,7 +56,7 @@
assert isinstance(self, W_File)
try:
self.direct_close()
- except StreamErrors, e:
+ except StreamErrors as e:
operr = wrap_streamerror(self.space, e, self.w_name)
raise operr
@@ -200,7 +200,7 @@
while n > 0:
try:
data = stream.read(n)
- except OSError, e:
+ except OSError as e:
# a special-case only for read() (similar to CPython, which
# also loses partial data with other methods): if we get
# EAGAIN after already some data was received, return it.
diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -81,7 +81,7 @@
"""
try:
return self.stream.read(n)
- except StreamErrors, e:
+ except StreamErrors as e:
raise wrap_streamerror(self.space, e)
def do_write(self, data):
@@ -92,7 +92,7 @@
"""
try:
self.stream.write(data)
- except StreamErrors, e:
+ except StreamErrors as e:
raise wrap_streamerror(self.space, e)
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -151,7 +151,7 @@
def test_oserror_has_filename(self):
try:
f = self.file("file that is clearly not there")
- except IOError, e:
+ except IOError as e:
assert e.filename == 'file that is clearly not there'
else:
raise Exception("did not raise")
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -28,7 +28,7 @@
space = global_name_fetcher.space
w_name = space.wrap(rffi.charp2str(obj_name[0].c_name))
global_name_fetcher.meth_names.append(w_name)
- except OperationError, e:
+ except OperationError as e:
global_name_fetcher.w_error = e
class NameFetcher:
diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -99,7 +99,7 @@
for hash_name, expected in sorted(expected_results.items()):
try:
m = _hashlib.new(hash_name)
- except ValueError, e:
+ except ValueError as e:
print 'skipped %s: %s' % (hash_name, e)
continue
m.update(test_string)
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -220,7 +220,7 @@
typename = space.type(self).name
try:
w_name = space.getattr(self, space.wrap("name"))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_Exception):
raise
return space.wrap("<%s>" % (typename,))
@@ -347,7 +347,7 @@
while True:
try:
w_written = space.call_method(self.w_raw, "write", w_data)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
raise
@@ -521,7 +521,7 @@
while True:
try:
w_size = space.call_method(self.w_raw, "readinto", w_buf)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
From pypy.commits at gmail.com Mon May 2 20:01:00 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 17:01:00 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Don't use deprecated except clause syntax
(pypy/)
Message-ID: <5727ea3c.de361c0a.b05f.63fb@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r84151:9b0fffe1e09b
Date: 2016-05-02 21:09 +0100
http://bitbucket.org/pypy/pypy/changeset/9b0fffe1e09b/
Log: Don't use deprecated except clause syntax (pypy/)
diff too long, truncating to 2000 out of 4882 lines
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -70,7 +70,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space).encode('utf-8'))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -78,7 +78,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space).encode('utf-8'))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -122,7 +122,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space).encode('utf-8'))
@@ -174,7 +174,7 @@
sys._pypy_execute_source.append(glob)
exec(stmt, glob)
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space).encode('utf-8'))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -87,7 +87,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -114,7 +114,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -311,7 +311,7 @@
self._match_signature(w_firstarg,
scope_w, signature, defaults_w,
w_kw_defs, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %8", fnname, e.getmsg())
return signature.scope_length()
@@ -335,7 +335,7 @@
try:
return self._parse(w_firstarg, signature, defaults_w, w_kw_defs,
blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %8", fnname, e.getmsg())
@staticmethod
@@ -386,7 +386,7 @@
for w_key in keys_w:
try:
key = space.identifier_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -114,7 +114,7 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def new_identifier(self, name):
@@ -124,9 +124,9 @@
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_del_stmt(self, del_node):
@@ -1124,7 +1124,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1144,7 +1144,7 @@
parsestring.parsestr(
space, encoding, atom_node.get_child(i).get_value())
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not (e.match(space, space.w_UnicodeError) or
e.match(space, space.w_ValueError)):
raise
@@ -1156,7 +1156,7 @@
for i in range(1, len(sub_strings_w)):
try:
w_string = space.add(w_string, sub_strings_w[i])
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_TypeError):
raise
self.error("cannot mix bytes and nonbytes literals",
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -320,7 +320,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -758,7 +758,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -770,7 +770,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -369,7 +369,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -76,7 +76,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -101,7 +101,7 @@
def getname(self, space):
try:
return space.unicode_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return u'?'
raise
@@ -325,7 +325,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -413,7 +413,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -444,7 +444,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -481,7 +481,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -623,7 +623,7 @@
while True:
try:
w_name = self.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break
@@ -784,7 +784,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -792,7 +792,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -896,7 +896,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -920,7 +920,7 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -967,7 +967,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -977,7 +977,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -1213,7 +1213,7 @@
def isabstractmethod_w(self, w_obj):
try:
w_result = self.getattr(w_obj, self.wrap("__isabstractmethod__"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
return False
raise
@@ -1286,7 +1286,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1364,7 +1364,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1374,7 +1374,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1541,7 +1541,7 @@
# the unicode buffer.)
try:
return self.bytes_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
return self.buffer_w(w_obj, flags).as_str()
@@ -1753,7 +1753,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1764,7 +1764,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1778,7 +1778,7 @@
if not self.isinstance_w(w_fd, self.w_int):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
raise OperationError(self.w_TypeError,
self.wrap("argument must be an int, or have a fileno() "
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -562,7 +562,7 @@
while pending is not None:
try:
pending.callback(pending.w_obj)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(space, pending.descrname, pending.w_obj)
e.clear(space) # break up reference cycles
pending = pending.next
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -412,7 +412,7 @@
def fset_func_qualname(self, space, w_name):
try:
self.qualname = space.unicode_w(w_name)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(space.w_TypeError,
space.wrap("__qualname__ must be set "
@@ -536,7 +536,7 @@
try:
return space.call_method(space.w_object, '__getattribute__',
space.wrap(self), w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# fall-back to the attribute of the underlying 'im_func'
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -702,7 +702,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -719,7 +719,7 @@
space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
raise OperationError(space.w_RuntimeError,
space.wrap("maximum recursion depth exceeded"))
@@ -741,7 +741,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -762,7 +762,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args.prepend(w_obj))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -780,7 +780,7 @@
except DescrMismatch:
raise OperationError(space.w_SystemError,
space.wrap("unexpected DescrMismatch error"))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -800,7 +800,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -820,7 +820,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -840,7 +840,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2, w3]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -861,7 +861,7 @@
self.descr_reqcls,
Arguments(space,
[w1, w2, w3, w4]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -219,7 +219,7 @@
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
space.w_None)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration) or \
e.match(space, space.w_GeneratorExit):
return space.w_None
@@ -272,7 +272,7 @@
results=results, pycode=pycode)
try:
w_result = frame.execute_frame(space.w_None)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -8,7 +8,7 @@
w_modules = space.sys.get('modules')
try:
return space.getitem(w_modules, w_main)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
mainmodule = module.Module(space, w_main)
@@ -54,7 +54,7 @@
else:
return
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.record_interpreter_traceback()
raise
@@ -106,7 +106,7 @@
try:
# run it
f()
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.normalize_exception(space)
w_type = operationerr.w_type
w_value = operationerr.get_w_value(space)
@@ -150,7 +150,7 @@
space.call_function(w_hook, w_type, w_value, w_traceback)
return False # done
- except OperationError, err2:
+ except OperationError as err2:
# XXX should we go through sys.get('stderr') ?
print >> sys.stderr, 'Error calling sys.excepthook:'
err2.print_application_traceback(space)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -190,7 +190,7 @@
while 1:
try:
value = eval(spec, d)
- except NameError, ex:
+ except NameError as ex:
name = ex.args[0].split("'")[1] # super-Evil
if name in d:
raise # propagate the NameError
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -134,7 +134,7 @@
if code_hook is not None:
try:
self.space.call_function(code_hook, self)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -55,21 +55,21 @@
try:
code = self.compile(source, filename, mode, flags)
return code # success
- except OperationError, err:
+ except OperationError as err:
if not err.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n", filename, mode, flags)
return None # expect more
- except OperationError, err1:
+ except OperationError as err1:
if not err1.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n\n", filename, mode, flags)
raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
+ except OperationError as err2:
if not err2.match(space, space.w_SyntaxError):
raise
@@ -132,7 +132,7 @@
try:
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError,
e.wrap_info(space))
return code
@@ -153,13 +153,13 @@
try:
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
- except parseerror.TabError, e:
+ except parseerror.TabError as e:
raise OperationError(space.w_TabError,
e.wrap_info(space))
- except parseerror.IndentationError, e:
+ except parseerror.IndentationError as e:
raise OperationError(space.w_IndentationError,
e.wrap_info(space))
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError,
e.wrap_info(space))
return mod
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -72,10 +72,10 @@
def handle_bytecode(self, co_code, next_instr, ec):
try:
next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
- except OperationError, operr:
+ except OperationError as operr:
operr.record_context(self.space, self)
next_instr = self.handle_operation_error(ec, operr)
- except RaiseWithExplicitTraceback, e:
+ except RaiseWithExplicitTraceback as e:
next_instr = self.handle_operation_error(ec, e.operr,
attach_tb=False)
except KeyboardInterrupt:
@@ -84,7 +84,7 @@
except MemoryError:
next_instr = self.handle_asynchronous_error(ec,
self.space.w_MemoryError)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
# Note that this case catches AttributeError!
rstackovf.check_stack_overflow()
next_instr = self.handle_asynchronous_error(ec,
@@ -123,7 +123,7 @@
finally:
if trace is not None:
self.getorcreatedebug().w_f_trace = trace
- except OperationError, e:
+ except OperationError as e:
operr = e
pytraceback.record_application_traceback(
self.space, operr, self, self.last_instr)
@@ -768,7 +768,7 @@
w_varname = self.getname_w(varindex)
try:
self.space.delitem(self.getorcreatedebug().w_locals, w_varname)
- except OperationError, e:
+ except OperationError as e:
# catch KeyErrors and turn them into NameErrors
if not e.match(self.space, self.space.w_KeyError):
raise
@@ -964,7 +964,7 @@
try:
if space.int_w(w_flag) == -1:
w_flag = None
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
@@ -1000,7 +1000,7 @@
w_module = self.peekvalue()
try:
w_obj = self.space.getattr(w_module, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
raise oefmt(self.space.w_ImportError,
@@ -1087,7 +1087,7 @@
w_iterator = self.peekvalue()
try:
w_nextitem = self.space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_StopIteration):
raise
# iterator exhausted
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -129,7 +129,7 @@
enc = 'utf-8'
try:
textsrc = recode_to_utf8(self.space, bytessrc, enc)
- except OperationError, e:
+ except OperationError as e:
# if the codec is not found, LookupError is raised. we
# check using 'is_w' not to mask potential IndexError or
# KeyError
@@ -191,10 +191,10 @@
raise new_err(msg, lineno, column,
line, compile_info.filename)
- except error.TokenError, e:
+ except error.TokenError as e:
e.filename = compile_info.filename
raise
- except parser.ParseError, e:
+ except parser.ParseError as e:
# Catch parse errors, pretty them up and reraise them as a
# SyntaxError.
new_err = error.IndentationError
diff --git a/pypy/interpreter/pyparser/test/unittest_samples.py b/pypy/interpreter/pyparser/test/unittest_samples.py
--- a/pypy/interpreter/pyparser/test/unittest_samples.py
+++ b/pypy/interpreter/pyparser/test/unittest_samples.py
@@ -66,7 +66,7 @@
print
try:
assert_tuples_equal(pypy_tuples, python_tuples)
- except AssertionError,e:
+ except AssertionError as e:
error_path = e.args[-1]
print "ERROR PATH =", error_path
print "="*80
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -214,7 +214,7 @@
def _spawn(self, *args, **kwds):
try:
import pexpect
- except ImportError, e:
+ except ImportError as e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -628,14 +628,14 @@
space = self.space
try:
Arguments(space, [], w_stararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after * must be a sequence, not int"
else:
assert 0, "did not raise"
try:
Arguments(space, [], w_starstararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after ** must be a mapping, not int"
else:
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -700,7 +700,7 @@
""")
try:
self.compiler.compile(str(source), '', 'exec', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -710,7 +710,7 @@
code = 'def f(): (yield bar) += y'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -720,7 +720,7 @@
code = 'dict(a = i for i in xrange(10))'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -27,7 +27,7 @@
wrappedfunc = space.getitem(w_glob, w(functionname))
try:
w_output = space.call(wrappedfunc, wrappedargs, wrappedkwargs)
- except error.OperationError, e:
+ except error.OperationError as e:
#e.print_detailed_traceback(space)
return '<<<%s>>>' % e.errorstr(space)
else:
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -87,7 +87,7 @@
""")
try:
space.unpackiterable(w_a)
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_ZeroDivisionError):
raise
else:
@@ -237,7 +237,7 @@
self.space.getindex_w, w_instance2, self.space.w_IndexError)
try:
self.space.getindex_w(self.space.w_tuple, None, "foobar")
- except OperationError, e:
+ except OperationError as e:
assert e.match(self.space, self.space.w_TypeError)
assert "foobar" in e.errorstr(self.space)
else:
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -151,7 +151,7 @@
space.wrap(s),
space.wrap('?'),
space.wrap('exec'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_SyntaxError):
raise
else:
@@ -727,7 +727,7 @@
for s in VALID:
try:
compile(s, '?', 'exec')
- except Exception, e:
+ except Exception as e:
print '-'*20, 'FAILED TO COMPILE:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
@@ -735,7 +735,7 @@
for s in INVALID:
try:
raises(SyntaxError, compile, s, '?', 'exec')
- except Exception ,e:
+ except Exception as e:
print '-'*20, 'UNEXPECTEDLY COMPILED:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -561,7 +561,7 @@
def f(): yield 42
f().__reduce__()
""")
- except TypeError, e:
+ except TypeError as e:
if 'pickle generator' not in str(e):
raise
py.test.skip("Frames can't be __reduce__()-ed")
diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py
--- a/pypy/module/__builtin__/__init__.py
+++ b/pypy/module/__builtin__/__init__.py
@@ -88,7 +88,7 @@
space = self.space
try:
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
else:
diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py
--- a/pypy/module/__builtin__/abstractinst.py
+++ b/pypy/module/__builtin__/abstractinst.py
@@ -18,7 +18,7 @@
"""
try:
w_bases = space.getattr(w_cls, space.wrap('__bases__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return None
@@ -38,7 +38,7 @@
def abstract_getclass(space, w_obj):
try:
return space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return space.type(w_obj)
@@ -60,7 +60,7 @@
w_result = space.isinstance_allow_override(w_obj, w_klass_or_tuple)
else:
w_result = space.isinstance(w_obj, w_klass_or_tuple)
- except OperationError, e: # if w_klass_or_tuple was not a type, ignore it
+ except OperationError as e: # if w_klass_or_tuple was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
@@ -78,7 +78,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_pretendtype, w_klass_or_tuple)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
return False # ignore most exceptions
@@ -95,7 +95,7 @@
" or tuple of classes and types")
try:
w_abstractclass = space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if e.async(space): # ignore most exceptions
raise
return False
@@ -135,7 +135,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_derived, w_klass_or_tuple)
- except OperationError, e: # if one of the args was not a type, ignore it
+ except OperationError as e: # if one of the args was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py
--- a/pypy/module/__builtin__/compiling.py
+++ b/pypy/module/__builtin__/compiling.py
@@ -113,7 +113,7 @@
try:
w_prep = space.getattr(w_meta, space.wrap("__prepare__"))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
w_namespace = space.newdict()
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -95,7 +95,7 @@
else:
try:
w_type = space.getattr(w_obj_or_type, space.wrap('__class__'))
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_AttributeError):
raise
w_type = w_objtype
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -146,7 +146,7 @@
jitdriver.jit_merge_point(has_key=has_key, has_item=has_item, w_type=w_type)
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
@@ -322,7 +322,7 @@
w_index = space.wrap(self.remaining)
try:
w_item = space.getitem(self.w_sequence, w_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
@@ -687,7 +687,7 @@
for iterable_w in args_w:
try:
iterator_w = space.iter(iterable_w)
- except OperationError, e:
+ except OperationError as e:
if e.match(self.space, self.space.w_TypeError):
raise OperationError(space.w_TypeError, space.wrap(self._error_name + " argument #" + str(i + 1) + " must support iteration"))
else:
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -62,7 +62,7 @@
w_name = checkattrname(space, w_name)
try:
return space.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
if w_defvalue is not None:
if e.match(space, space.w_AttributeError):
return w_defvalue
@@ -74,7 +74,7 @@
w_name = checkattrname(space, w_name)
try:
space.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
return space.w_False
raise
@@ -149,7 +149,7 @@
is exhausted, it is returned instead of raising StopIteration."""
try:
return space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if w_default is not None and e.match(space, space.w_StopIteration):
return w_default
raise
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -96,7 +96,7 @@
def validate_fd(space, fd):
try:
rposix.validate_fd(fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
@unwrap_spec(sizehint=int)
diff --git a/pypy/module/__pypy__/interp_stderrprinter.py b/pypy/module/__pypy__/interp_stderrprinter.py
--- a/pypy/module/__pypy__/interp_stderrprinter.py
+++ b/pypy/module/__pypy__/interp_stderrprinter.py
@@ -29,7 +29,7 @@
def descr_isatty(self, space):
try:
res = os.isatty(self.fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return space.wrap(res)
@@ -39,7 +39,7 @@
try:
n = os.write(self.fd, data)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e)
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -113,7 +113,7 @@
must_leave = space.threadlocals.try_enter_thread(space)
self.py_invoke(ll_res, ll_args)
#
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "SystemError: callback raised ")
@@ -143,7 +143,7 @@
w_res = space.call(self.w_callable, w_args)
extra_line = "Trying to convert the result back to C:\n"
self.convert_result(ll_res, w_res)
- except OperationError, e:
+ except OperationError as e:
self.handle_applevel_exception(e, ll_res, extra_line)
@jit.unroll_safe
@@ -188,7 +188,7 @@
w_res = space.call_function(self.w_onerror, w_t, w_v, w_tb)
if not space.is_none(w_res):
self.convert_result(ll_res, w_res)
- except OperationError, e2:
+ except OperationError as e2:
# double exception! print a double-traceback...
self.print_error(e, extra_line) # original traceback
e2.write_unraisable(space, '', with_traceback=True,
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -247,7 +247,7 @@
for i in range(length):
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise oefmt(space.w_ValueError,
@@ -256,7 +256,7 @@
target = rffi.ptradd(target, ctitemsize)
try:
space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -21,7 +21,7 @@
filename = ""
try:
handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -50,7 +50,7 @@
builder = CifDescrBuilder(fargs, fresult, abi)
try:
builder.rawallocate(self)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_NotImplementedError):
raise
# else, eat the NotImplementedError. We will get the
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -177,12 +177,12 @@
space = self.space
try:
fieldname = space.str_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
try:
index = space.int_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise OperationError(space.w_TypeError,
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -388,6 +388,6 @@
mode = space.str_w(space.getattr(w_fileobj, space.wrap("mode")))
try:
w_fileobj.cffi_fileobj = CffiFileObj(fd, mode)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py
--- a/pypy/module/_cffi_backend/embedding.py
+++ b/pypy/module/_cffi_backend/embedding.py
@@ -79,7 +79,7 @@
patch_sys(space)
load_embedded_cffi_module(space, version, init_struct)
res = 0
- except OperationError, operr:
+ except OperationError as operr:
operr.write_unraisable(space, "initialization of '%s'" % name,
with_traceback=True)
space.appexec([], r"""():
@@ -91,7 +91,7 @@
res = -1
if must_leave:
space.threadlocals.leave_thread(space)
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "From initialization of '")
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -109,7 +109,7 @@
# w.r.t. buffers and memoryviews??
try:
buf = space.readbuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_SIMPLE)
@@ -118,7 +118,7 @@
def _fetch_as_write_buffer(space, w_x):
try:
buf = space.writebuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_WRITABLE)
diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -39,7 +39,7 @@
mod = __import__(modname, None, None, ['ffi', 'lib'])
return mod.lib""")
lib1 = space.interp_w(W_LibObject, w_lib1)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
raise oefmt(space.w_ImportError,
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -24,7 +24,7 @@
filename = ""
try:
self.handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -130,7 +130,7 @@
# other types of objects. It refuses floats.
try:
value = space.int_w(w_ob)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(space, space.w_OverflowError) or
e.match(space, space.w_TypeError)):
raise
@@ -138,7 +138,7 @@
return value
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -155,7 +155,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -173,7 +173,7 @@
# mask the result and round floats.
try:
value = space.int_w(w_ob)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(space, space.w_OverflowError) or
e.match(space, space.w_TypeError)):
raise
@@ -183,7 +183,7 @@
return r_ulonglong(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
@@ -203,7 +203,7 @@
# same as as_unsigned_long_long(), but returning just an Unsigned
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -67,7 +67,7 @@
w_replace, w_newpos = space.fixedview(w_res, 2)
try:
newpos = space.int_w(w_newpos)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_OverflowError):
raise
newpos = -1
@@ -201,7 +201,7 @@
w_start = space.getattr(w_exc, space.wrap('start'))
w_end = space.getattr(w_exc, space.wrap('end'))
w_obj = space.getattr(w_exc, space.wrap('object'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
raise OperationError(space.w_TypeError, space.wrap(
@@ -657,7 +657,7 @@
else:
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -690,7 +690,7 @@
# get the character from the mapping
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -769,7 +769,7 @@
space = self.space
try:
w_code = space.call_function(self.w_getcode, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
return -1
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -169,7 +169,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
@@ -191,7 +191,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -224,7 +224,7 @@
try:
frame = self.bottomframe
w_result = frame.execute_frame()
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py
--- a/pypy/module/_continuation/interp_pickle.py
+++ b/pypy/module/_continuation/interp_pickle.py
@@ -69,7 +69,7 @@
try:
w_result = post_switch(sthread, h)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
#
@@ -88,7 +88,7 @@
try:
w_result = frame.execute_frame(w_result, operr)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
if exit_continulet is not None:
@@ -97,7 +97,7 @@
sthread.ec.topframeref = jit.vref_None
if operr:
raise operr
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py
--- a/pypy/module/_continuation/test/support.py
+++ b/pypy/module/_continuation/test/support.py
@@ -8,6 +8,6 @@
def setup_class(cls):
try:
import rpython.rlib.rstacklet
- except CompilationError, e:
+ except CompilationError as e:
py.test.skip("cannot import rstacklet: %s" % e)
diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py
--- a/pypy/module/_csv/interp_reader.py
+++ b/pypy/module/_csv/interp_reader.py
@@ -59,7 +59,7 @@
while True:
try:
w_line = space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
if (field_builder is not None and
state != START_RECORD and state != EAT_CRNL and
diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py
--- a/pypy/module/_csv/interp_writer.py
+++ b/pypy/module/_csv/interp_writer.py
@@ -49,7 +49,7 @@
try:
space.float_w(w_field) # is it an int/long/float?
quoted = False
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
quoted = True
@@ -124,7 +124,7 @@
while True:
try:
w_seq = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -28,7 +28,7 @@
space = global_name_fetcher.space
w_name = space.wrap(rffi.charp2str(obj_name[0].c_name))
global_name_fetcher.meth_names.append(w_name)
- except OperationError, e:
+ except OperationError as e:
global_name_fetcher.w_error = e
class NameFetcher:
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -217,7 +217,7 @@
typename = space.type(self).name.decode('utf-8')
try:
w_name = space.getattr(self, space.wrap("name"))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_Exception):
raise
return space.wrap(u"<%s>" % (typename,))
@@ -359,7 +359,7 @@
while True:
try:
w_written = space.call_method(self.w_raw, "write", w_data)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
raise
@@ -535,7 +535,7 @@
while True:
try:
w_size = space.call_method(self.w_raw, "readinto", w_buf)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
raise
@@ -742,7 +742,7 @@
# First write the current buffer
try:
self._writer_flush_unlocked(space)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_BlockingIOError):
raise
if self.readable:
diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py
--- a/pypy/module/_io/interp_fileio.py
+++ b/pypy/module/_io/interp_fileio.py
@@ -149,7 +149,7 @@
fd = -1
try:
fd = space.c_int_w(w_name)
- except OperationError, e:
+ except OperationError as e:
pass
else:
if fd < 0:
@@ -163,7 +163,7 @@
if fd >= 0:
try:
os.fstat(fd)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EBADF:
raise wrap_oserror(space, e)
# else: pass
@@ -180,7 +180,7 @@
try:
self.fd = dispatch_filename(rposix.open)(
space, w_name, flags, 0666)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror2(space, e, w_name,
exception_name='w_IOError')
finally:
@@ -205,7 +205,7 @@
# (otherwise, it might be done only on the first write()).
try:
os.lseek(self.fd, 0, os.SEEK_END)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e, exception_name='w_IOError')
except:
if not fd_is_own:
@@ -259,7 +259,7 @@
try:
os.close(fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e,
exception_name='w_IOError')
@@ -307,7 +307,7 @@
self._check_closed(space)
try:
pos = os.lseek(self.fd, pos, whence)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e,
exception_name='w_IOError')
return space.wrap(pos)
@@ -316,7 +316,7 @@
self._check_closed(space)
try:
pos = os.lseek(self.fd, 0, 1)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e,
exception_name='w_IOError')
return space.wrap(pos)
@@ -350,7 +350,7 @@
self._check_closed(space)
try:
res = os.isatty(self.fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e, exception_name='w_IOError')
return space.wrap(res)
@@ -377,7 +377,7 @@
try:
n = os.write(self.fd, data)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e,
@@ -395,7 +395,7 @@
try:
s = os.read(self.fd, size)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e,
@@ -410,7 +410,7 @@
length = rwbuffer.getlength()
try:
buf = os.read(self.fd, length)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN:
return space.w_None
raise wrap_oserror(space, e,
@@ -429,7 +429,7 @@
try:
chunk = os.read(self.fd, newsize - total)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR:
space.getexecutioncontext().checksignals()
continue
@@ -463,7 +463,7 @@
try:
self._truncate(space.r_longlong_w(w_size))
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e, exception_name='w_IOError')
return w_size
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -203,7 +203,7 @@
if has_peek:
try:
w_readahead = space.call_method(self, "peek", space.wrap(1))
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
@@ -233,7 +233,7 @@
try:
w_read = space.call_method(self, "read", space.wrap(nreadahead))
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
@@ -283,14 +283,14 @@
while True:
try:
w_line = space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break # done
while True:
try:
space.call_method(self, "write", w_line)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
@@ -351,7 +351,7 @@
try:
w_data = space.call_method(self, "read",
space.wrap(DEFAULT_BUFFER_SIZE))
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -623,7 +623,7 @@
self.telling = False
try:
return W_TextIOBase.next_w(self, space)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
self.telling = self.seekable
raise
@@ -663,7 +663,7 @@
if not self._read_chunk(space):
# EOF
break
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
@@ -690,7 +690,7 @@
if not self._read_chunk(space):
has_data = False
break
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
@@ -834,7 +834,7 @@
try:
space.call_method(self.w_buffer, "write",
space.wrapbytes(pending_bytes))
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue
raise
diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py
--- a/pypy/module/_locale/interp_locale.py
+++ b/pypy/module/_locale/interp_locale.py
@@ -29,7 +29,7 @@
locale = space.str_w(w_locale)
try:
result = rlocale.setlocale(category, locale)
- except rlocale.LocaleError, e:
+ except rlocale.LocaleError as e:
raise rewrap_error(space, e)
return space.wrap(result)
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -310,7 +310,7 @@
return space.int_w(space.call_function(self.w_callable))
else:
return space.r_longlong_w(space.call_function(self.w_callable))
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(space, "timer function ",
self.w_callable)
return timer_size_int(0)
diff --git a/pypy/module/_minimal_curses/interp_curses.py b/pypy/module/_minimal_curses/interp_curses.py
--- a/pypy/module/_minimal_curses/interp_curses.py
+++ b/pypy/module/_minimal_curses/interp_curses.py
@@ -30,14 +30,14 @@
# NOT_RPYTHON
try:
_curses.setupterm(None, fd)
- except _curses.error, e:
+ except _curses.error as e:
raise curses_error(e.args[0])
def _curses_setupterm(termname, fd):
# NOT_RPYTHON
try:
_curses.setupterm(termname, fd)
- except _curses.error, e:
+ except _curses.error as e:
raise curses_error(e.args[0])
@unwrap_spec(fd=int)
@@ -52,7 +52,7 @@
_curses_setupterm_null(fd)
else:
_curses_setupterm(space.str_w(w_termname), fd)
- except curses_error, e:
+ except curses_error as e:
raise convert_error(space, e)
class TermError(Exception):
@@ -62,7 +62,7 @@
# NOT_RPYTHON
try:
res = _curses.tigetstr(capname)
- except _curses.error, e:
+ except _curses.error as e:
raise curses_error(e.args[0])
if res is None:
raise TermError
@@ -72,7 +72,7 @@
# NOT_RPYTHON
try:
return _curses.tparm(s, *args)
- except _curses.error, e:
+ except _curses.error as e:
raise curses_error(e.args[0])
@unwrap_spec(capname=str)
@@ -81,7 +81,7 @@
result = _curses_tigetstr(capname)
except TermError:
return space.w_None
- except curses_error, e:
+ except curses_error as e:
raise convert_error(space, e)
return space.wrapbytes(result)
@@ -90,5 +90,5 @@
args = [space.int_w(a) for a in args_w]
try:
return space.wrapbytes(_curses_tparm(s, args))
- except curses_error, e:
+ except curses_error as e:
raise convert_error(space, e)
diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py
--- a/pypy/module/_multibytecodec/interp_incremental.py
+++ b/pypy/module/_multibytecodec/interp_incremental.py
@@ -57,7 +57,7 @@
output = c_codecs.decodeex(self.decodebuf, object, self.errors,
state.decode_error_handler, self.name,
get_ignore_error(final))
- except c_codecs.EncodeDecodeError, e:
+ except c_codecs.EncodeDecodeError as e:
raise wrap_unicodedecodeerror(space, e, object, self.name)
except RuntimeError:
raise wrap_runtimeerror(space)
@@ -105,7 +105,7 @@
output = c_codecs.encodeex(self.encodebuf, object, self.errors,
state.encode_error_handler, self.name,
get_ignore_error(final))
- except c_codecs.EncodeDecodeError, e:
+ except c_codecs.EncodeDecodeError as e:
raise wrap_unicodeencodeerror(space, e, object, self.name)
except RuntimeError:
raise wrap_runtimeerror(space)
diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py
--- a/pypy/module/_multibytecodec/interp_multibytecodec.py
+++ b/pypy/module/_multibytecodec/interp_multibytecodec.py
@@ -20,7 +20,7 @@
try:
output = c_codecs.decode(self.codec, input, errors,
state.decode_error_handler, self.name)
- except c_codecs.EncodeDecodeError, e:
+ except c_codecs.EncodeDecodeError as e:
raise wrap_unicodedecodeerror(space, e, input, self.name)
except RuntimeError:
raise wrap_runtimeerror(space)
@@ -36,7 +36,7 @@
try:
output = c_codecs.encode(self.codec, input, errors,
state.encode_error_handler, self.name)
- except c_codecs.EncodeDecodeError, e:
+ except c_codecs.EncodeDecodeError as e:
raise wrap_unicodeencodeerror(space, e, input, self.name)
except RuntimeError:
raise wrap_runtimeerror(space)
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -318,7 +318,7 @@
data = rffi.charpsize2str(message, size)
try:
count = self.WRITE(data)
- except OSError, e:
+ except OSError as e:
if e.errno == EINTR:
space.getexecutioncontext().checksignals()
continue
@@ -332,7 +332,7 @@
while remaining > 0:
try:
data = self.READ(remaining)
- except OSError, e:
+ except OSError as e:
if e.errno == EINTR:
space.getexecutioncontext().checksignals()
continue
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -361,7 +361,7 @@
sem_wait(self.handle)
else:
sem_timedwait(self.handle, deadline)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR:
# again
continue
@@ -386,7 +386,7 @@
# make sure that already locked
try:
sem_trywait(self.handle)
- except OSError, e:
+ except OSError as e:
if e.errno != errno.EAGAIN:
raise
# it is already locked as expected
@@ -422,7 +422,7 @@
if HAVE_BROKEN_SEM_GETVALUE:
try:
sem_trywait(self.handle)
- except OSError, e:
+ except OSError as e:
if e.errno != errno.EAGAIN:
raise
return True
@@ -459,14 +459,14 @@
def is_zero(self, space):
try:
res = semlock_iszero(self, space)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return space.wrap(res)
def get_value(self, space):
try:
val = semlock_getvalue(self, space)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return space.wrap(val)
@@ -479,7 +479,7 @@
try:
got = semlock_acquire(self, space, block, w_timeout)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
if got:
@@ -502,7 +502,7 @@
try:
semlock_release(self, space)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
self.count -= 1
@@ -536,7 +536,7 @@
try:
handle = create_semaphore(space, name, value, maxvalue)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
self = space.allocate_instance(W_SemLock, w_subtype)
diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py
--- a/pypy/module/_posixsubprocess/interp_subprocess.py
+++ b/pypy/module/_posixsubprocess/interp_subprocess.py
@@ -173,7 +173,7 @@
try:
try:
pid = os.fork()
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
if pid == 0:
diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py
--- a/pypy/module/_pypyjson/targetjson.py
+++ b/pypy/module/_pypyjson/targetjson.py
@@ -126,7 +126,7 @@
try:
bench('loads ', N, myloads, msg)
- except OperationError, e:
+ except OperationError as e:
print 'Error', e._compute_value(fakespace)
return 0
diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py
--- a/pypy/module/_rawffi/alt/interp_funcptr.py
+++ b/pypy/module/_rawffi/alt/interp_funcptr.py
@@ -114,7 +114,7 @@
func_caller = CallFunctionConverter(space, self.func, argchain)
try:
return func_caller.do_and_wrap(self.w_restype)
- except StackCheckError, e:
+ except StackCheckError as e:
raise OperationError(space.w_ValueError, space.wrap(e.message))
#return self._do_call(space, argchain)
@@ -324,7 +324,7 @@
self.name = name
try:
self.cdll = libffi.CDLL(name, mode)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(space, e, self.name)
def getfunc(self, space, w_name, w_argtypes, w_restype):
@@ -374,6 +374,6 @@
def get_libc(space):
try:
return space.wrap(W_CDLL(space, get_libc_name(), -1))
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py
--- a/pypy/module/_rawffi/array.py
+++ b/pypy/module/_rawffi/array.py
@@ -112,7 +112,7 @@
def descr_setitem(self, space, w_index, w_value):
try:
num = space.int_w(w_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
self.setslice(space, w_index, w_value)
@@ -130,7 +130,7 @@
def descr_getitem(self, space, w_index):
try:
num = space.int_w(w_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
return self.getslice(space, w_index)
diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py
--- a/pypy/module/_rawffi/callback.py
+++ b/pypy/module/_rawffi/callback.py
From pypy.commits at gmail.com Mon May 2 20:01:03 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 17:01:03 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: hg merge default
Message-ID: <5727ea3f.e873c20a.1aab6.2fe7@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r84152:ecb21bf4a68d
Date: 2016-05-03 00:59 +0100
http://bitbucket.org/pypy/pypy/changeset/ecb21bf4a68d/
Log: hg merge default
diff too long, truncating to 2000 out of 3805 lines
diff --git a/TODO b/TODO
deleted file mode 100644
--- a/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* reduce size of generated c code from slot definitions in slotdefs.
-* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -27,7 +27,6 @@
self.space = space
assert isinstance(args_w, list)
self.arguments_w = args_w
-
self.keywords = keywords
self.keywords_w = keywords_w
self.keyword_names_w = keyword_names_w # matches the tail of .keywords
@@ -137,11 +136,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -520,7 +519,6 @@
msg = "takes %s but %s given" % (takes_str, given_str)
return msg
-
class ArgErrMultipleValues(ArgErr):
def __init__(self, argname):
@@ -530,7 +528,6 @@
msg = "got multiple values for argument '%s'" % self.argname
return msg
-
class ArgErrUnknownKwds(ArgErr):
def __init__(self, space, num_remainingkwds, keywords, kwds_mapping,
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -476,7 +476,6 @@
def getbuiltinmodule(self, name, force_init=False, reuse=True):
w_name = self.wrap(name)
w_modules = self.sys.get('modules')
-
if not force_init:
assert reuse
try:
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -550,7 +550,7 @@
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
- raise ValueError, "new fastscope is longer than the allocated area"
+ raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1098,7 +1098,7 @@
return next_instr
def FOR_LOOP(self, oparg, next_instr):
- raise BytecodeCorruption, "old opcode, no longer in use"
+ raise BytecodeCorruption("old opcode, no longer in use")
def SETUP_LOOP(self, offsettoend, next_instr):
block = LoopBlock(self.valuestackdepth,
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -152,17 +152,6 @@
class InvalidPointerException(Exception):
pass
-DEBUG_REFCOUNT = False
-
-def debug_refcount(*args, **kwargs):
- frame_stackdepth = kwargs.pop("frame_stackdepth", 2)
- assert not kwargs
- frame = sys._getframe(frame_stackdepth)
- print >>sys.stderr, "%25s" % (frame.f_code.co_name, ),
- for arg in args:
- print >>sys.stderr, arg,
- print >>sys.stderr
-
def create_ref(space, w_obj, itemcount=0):
"""
Allocates a PyObject, and fills its fields with info from the given
@@ -192,10 +181,6 @@
# XXX looks like a PyObject_GC_TRACK
assert py_obj.c_ob_refcnt < rawrefcount.REFCNT_FROM_PYPY
py_obj.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY
- if DEBUG_REFCOUNT:
- debug_refcount("MAKREF", py_obj, w_obj)
- assert w_obj
- assert py_obj
rawrefcount.create_link_pypy(w_obj, py_obj)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -741,4 +741,4 @@
elif _name not in ['is_', 'id','type','issubtype', 'int',
# not really to be defined in DescrOperation
'ord', 'unichr', 'unicode']:
- raise Exception, "missing def for operation %s" % _name
+ raise Exception("missing def for operation %s" % _name)
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -749,9 +749,9 @@
return None
class IterClassItems(BaseItemIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiteritems_with_hash(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiteritems_with_hash(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
if override_next_item is not None:
next_item_entry = override_next_item
@@ -764,9 +764,9 @@
return None, None
class IterClassReversed(BaseKeyIterator):
- def __init__(self, space, strategy, impl):
- self.iterator = strategy.getiterreversed(impl)
- BaseIteratorImplementation.__init__(self, space, strategy, impl)
+ def __init__(self, space, strategy, w_dict):
+ self.iterator = strategy.getiterreversed(w_dict)
+ BaseIteratorImplementation.__init__(self, space, strategy, w_dict)
def next_key_entry(self):
for key in self.iterator:
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -838,8 +838,7 @@
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_key_entry(self):
- implementation = self.w_dict
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -860,8 +859,7 @@
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_value_entry(self):
- implementation = self.w_dict
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
@@ -881,8 +879,7 @@
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_item_entry(self):
- implementation = self.w_dict
- assert isinstance(implementation.get_strategy(), MapDictStrategy)
+ assert isinstance(self.w_dict.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None, None
if self.curr_map:
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -142,7 +142,7 @@
if x is None:
return self.w_None
if isinstance(x, OperationError):
- raise TypeError, ("attempt to wrap already wrapped exception: %s"%
+ raise TypeError("attempt to wrap already wrapped exception: %s"%
(x,))
if isinstance(x, int):
if isinstance(x, bool):
diff --git a/pypy/tool/dis3.py b/pypy/tool/dis3.py
--- a/pypy/tool/dis3.py
+++ b/pypy/tool/dis3.py
@@ -44,9 +44,8 @@
elif isinstance(x, str):
disassemble_string(x)
else:
- raise TypeError, \
- "don't know how to disassemble %s objects" % \
- type(x).__name__
+ raise TypeError("don't know how to disassemble %s objects" % \
+ type(x).__name__)
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
@@ -54,7 +53,7 @@
try:
tb = sys.last_traceback
except AttributeError:
- raise RuntimeError, "no last traceback to disassemble"
+ raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
diff --git a/pypy/tool/importfun.py b/pypy/tool/importfun.py
--- a/pypy/tool/importfun.py
+++ b/pypy/tool/importfun.py
@@ -163,7 +163,7 @@
if name in opcode.opmap:
return opcode.opmap[name]
else:
- raise AttributeError, name
+ raise AttributeError(name)
_op_ = _Op()
diff --git a/pypy/tool/isolate.py b/pypy/tool/isolate.py
--- a/pypy/tool/isolate.py
+++ b/pypy/tool/isolate.py
@@ -50,7 +50,7 @@
if exc_type_module == 'exceptions':
raise getattr(exceptions, exc_type_name)
else:
- raise IsolateException, "%s.%s" % value
+ raise IsolateException("%s.%s" % value)
def _close(self):
if not self._closed:
diff --git a/pypy/tool/pydis.py b/pypy/tool/pydis.py
--- a/pypy/tool/pydis.py
+++ b/pypy/tool/pydis.py
@@ -96,8 +96,8 @@
for bytecode in self.bytecodes:
if bytecode.index == index:
return bytecode
- raise ValueError, "no bytecode found on index %s in code \n%s" % (
- index, pydis(self.code))
+ raise ValueError("no bytecode found on index %s in code \n%s" % (
+ index, pydis(self.code)))
def format(self):
lastlineno = -1
diff --git a/pypy/tool/rest/rst.py b/pypy/tool/rest/rst.py
--- a/pypy/tool/rest/rst.py
+++ b/pypy/tool/rest/rst.py
@@ -128,7 +128,7 @@
outcome = []
if (isinstance(self.children[0], Transition) or
isinstance(self.children[-1], Transition)):
- raise ValueError, ('document must not begin or end with a '
+ raise ValueError('document must not begin or end with a '
'transition')
for child in self.children:
outcome.append(child.text())
diff --git a/pypy/tool/test/isolate_simple.py b/pypy/tool/test/isolate_simple.py
--- a/pypy/tool/test/isolate_simple.py
+++ b/pypy/tool/test/isolate_simple.py
@@ -3,13 +3,13 @@
return a+b
def g():
- raise ValueError, "booh"
+ raise ValueError("booh")
class FancyException(Exception):
pass
def h():
- raise FancyException, "booh"
+ raise FancyException("booh")
def bomb():
raise KeyboardInterrupt
diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py
--- a/rpython/annotator/annrpython.py
+++ b/rpython/annotator/annrpython.py
@@ -345,10 +345,10 @@
del self.blocked_blocks[block]
try:
self.flowin(graph, block)
- except BlockedInference, e:
+ except BlockedInference as e:
self.annotated[block] = False # failed, hopefully temporarily
self.blocked_blocks[block] = (graph, e.opindex)
- except Exception, e:
+ except Exception as e:
# hack for debug tools only
if not hasattr(e, '__annotator_block'):
setattr(e, '__annotator_block', block)
@@ -382,7 +382,7 @@
oldcells = [self.binding(a) for a in block.inputargs]
try:
unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)]
- except annmodel.UnionError, e:
+ except annmodel.UnionError as e:
# Add source code to the UnionError
e.source = '\n'.join(source_lines(graph, block, None, long=True))
raise
diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py
--- a/rpython/annotator/description.py
+++ b/rpython/annotator/description.py
@@ -278,7 +278,7 @@
defs_s.append(self.bookkeeper.immutablevalue(x))
try:
inputcells = args.match_signature(signature, defs_s)
- except ArgErr, e:
+ except ArgErr as e:
raise AnnotatorError("signature mismatch: %s() %s" %
(self.name, e.getmsg()))
return inputcells
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -902,7 +902,7 @@
def f(l):
try:
l[0]
- except (KeyError, IndexError),e:
+ except (KeyError, IndexError) as e:
return e
return None
diff --git a/rpython/bin/translatorshell.py b/rpython/bin/translatorshell.py
--- a/rpython/bin/translatorshell.py
+++ b/rpython/bin/translatorshell.py
@@ -61,7 +61,7 @@
if __name__ == '__main__':
try:
setup_readline()
- except ImportError, err:
+ except ImportError as err:
print "Disabling readline support (%s)" % err
from rpython.translator.test import snippet
from rpython.rtyper.rtyper import RPythonTyper
diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py
--- a/rpython/flowspace/model.py
+++ b/rpython/flowspace/model.py
@@ -677,7 +677,7 @@
assert len(allexitcases) == len(block.exits)
vars_previous_blocks.update(vars)
- except AssertionError, e:
+ except AssertionError as e:
# hack for debug tools only
#graph.show() # <== ENABLE THIS TO SEE THE BROKEN GRAPH
if block and not hasattr(e, '__annotator_block'):
diff --git a/rpython/jit/backend/arm/test/support.py b/rpython/jit/backend/arm/test/support.py
--- a/rpython/jit/backend/arm/test/support.py
+++ b/rpython/jit/backend/arm/test/support.py
@@ -67,7 +67,7 @@
func(*args, **kwargs)
try:
f_name = name[:name.index('_')]
- except ValueError, e:
+ except ValueError as e:
f_name = name
self.assert_equal('%s%s %s' % (f_name, asm_ext, asm))
return f
diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py
--- a/rpython/jit/backend/detect_cpu.py
+++ b/rpython/jit/backend/detect_cpu.py
@@ -35,7 +35,7 @@
if not getdefined(macro, ''):
continue
return k
- raise ProcessorAutodetectError, "Cannot detect processor using compiler macros"
+ raise ProcessorAutodetectError("Cannot detect processor using compiler macros")
def detect_model_from_host_platform():
@@ -52,7 +52,7 @@
# assume we have 'uname'
mach = os.popen('uname -m', 'r').read().strip()
if not mach:
- raise ProcessorAutodetectError, "cannot run 'uname -m'"
+ raise ProcessorAutodetectError("cannot run 'uname -m'")
#
result ={'i386': MODEL_X86,
'i486': MODEL_X86,
@@ -74,7 +74,7 @@
}.get(mach)
if result is None:
- raise ProcessorAutodetectError, "unknown machine name %s" % mach
+ raise ProcessorAutodetectError("unknown machine name %s" % mach)
#
if result.startswith('x86'):
from rpython.jit.backend.x86 import detect_feature as feature
@@ -128,7 +128,7 @@
elif backend_name == MODEL_S390_64:
return "rpython.jit.backend.zarch.runner", "CPU_S390_64"
else:
- raise ProcessorAutodetectError, (
+ raise ProcessorAutodetectError(
"we have no JIT backend for this cpu: '%s'" % backend_name)
def getcpuclass(backend_name="auto"):
diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -404,7 +404,7 @@
try:
frame.execute(lltrace)
assert False
- except ExecutionFinished, e:
+ except ExecutionFinished as e:
return e.deadframe
def get_value_direct(self, deadframe, tp, index):
@@ -1097,7 +1097,7 @@
execute = getattr(self, 'execute_' + op.getopname())
try:
resval = execute(_getdescr(op), *args)
- except Jump, j:
+ except Jump as j:
self.lltrace, i = j.jump_target
if i >= 0:
label_op = self.lltrace.operations[i]
@@ -1348,7 +1348,7 @@
try:
res = self.cpu.maybe_on_top_of_llinterp(func, call_args, TP.RESULT)
self.last_exception = None
- except LLException, lle:
+ except LLException as lle:
self.last_exception = lle
res = _example_res[getkind(TP.RESULT)[0]]
return res
@@ -1444,7 +1444,7 @@
assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish
try:
result = assembler_helper_ptr(pframe, vable)
- except LLException, lle:
+ except LLException as lle:
assert self.last_exception is None, "exception left behind"
self.last_exception = lle
# fish op
diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py
--- a/rpython/jit/backend/llsupport/llmodel.py
+++ b/rpython/jit/backend/llsupport/llmodel.py
@@ -144,7 +144,7 @@
# all other fields are empty
llop.gc_writebarrier(lltype.Void, new_frame)
return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame)
- except Exception, e:
+ except Exception as e:
print "Unhandled exception", e, "in realloc_frame"
return lltype.nullptr(llmemory.GCREF.TO)
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -176,7 +176,7 @@
cls.cbuilder = compile(get_entry(allfuncs), cls.gc,
gcrootfinder=cls.gcrootfinder, jit=True,
thread=True)
- except ConfigError, e:
+ except ConfigError as e:
assert str(e).startswith('invalid value asmgcc')
py.test.skip('asmgcc not supported')
finally:
diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
@@ -34,7 +34,7 @@
try:
rvmprof.register_code_object_class(MyCode, get_name)
- except rvmprof.VMProfPlatformUnsupported, e:
+ except rvmprof.VMProfPlatformUnsupported as e:
py.test.skip(str(e))
def get_unique_id(code):
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -288,7 +288,7 @@
def main(i):
try:
myportal(i)
- except ImDone, e:
+ except ImDone as e:
return e.resvalue
# XXX custom fishing, depends on the exact env var and format
@@ -297,7 +297,7 @@
try:
res = self.meta_interp(main, [400])
assert res == main(400)
- except ConfigError,e:
+ except ConfigError as e:
assert str(e).startswith('invalid value asmgcc')
py.test.skip('asmgcc not supported')
finally:
diff --git a/rpython/jit/backend/ppc/form.py b/rpython/jit/backend/ppc/form.py
--- a/rpython/jit/backend/ppc/form.py
+++ b/rpython/jit/backend/ppc/form.py
@@ -48,7 +48,7 @@
def __call__(self, *args, **kw):
fieldvalues, sparefields = self.calc_fields(args, kw)
if sparefields:
- raise FormException, 'fields %s left'%sparefields
+ raise FormException('fields %s left'%sparefields)
self.assembler.insts.append(Instruction(fieldvalues))
@@ -72,7 +72,7 @@
self.boundtype = boundtype
for field in specializations:
if field not in fields:
- raise FormException, field
+ raise FormException(field)
def __get__(self, ob, cls=None):
if ob is None: return self
@@ -91,14 +91,14 @@
for fname, v in more_specializatons.iteritems():
field = self.fieldmap[fname]
if field not in self.fields:
- raise FormException, "don't know about '%s' here" % field
+ raise FormException("don't know about '%s' here" % field)
if isinstance(v, str):
ds[field] = self.fieldmap[v]
else:
ms[field] = v
s.update(ms)
if len(s) != len(self.specializations) + len(ms):
- raise FormException, "respecialization not currently allowed"
+ raise FormException("respecialization not currently allowed")
if ds:
fields = list(self.fields)
for field in ds:
@@ -175,8 +175,8 @@
overlap = True
for b in range(field.left, field.right+1):
if not overlap and b in bits:
- raise FormException, "'%s' and '%s' clash at bit '%s'"%(
- bits[b], fname, b)
+ raise FormException("'%s' and '%s' clash at bit '%s'"%(
+ bits[b], fname, b))
else:
bits[b] = fname
self.fields.append(field)
@@ -186,7 +186,7 @@
for fname in specializations:
field = self.fieldmap[fname]
if field not in self.fields:
- raise FormException, "no nothin bout '%s'"%fname
+ raise FormException("no nothin bout '%s'"%fname)
s[field] = specializations[fname]
return IDesc(self.fieldmap, self.fields, s)
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -780,7 +780,7 @@
return [SpaceOperation('-live-', [], None),
SpaceOperation('getfield_vable_%s' % kind,
[v_inst, descr], op.result)]
- except VirtualizableArrayField, e:
+ except VirtualizableArrayField as e:
# xxx hack hack hack
vinfo = e.args[1]
arrayindex = vinfo.array_field_counter[op.args[1].value]
diff --git a/rpython/jit/codewriter/policy.py b/rpython/jit/codewriter/policy.py
--- a/rpython/jit/codewriter/policy.py
+++ b/rpython/jit/codewriter/policy.py
@@ -103,7 +103,7 @@
getkind(v.concretetype, supports_floats,
supports_longlong,
supports_singlefloats)
- except NotImplementedError, e:
+ except NotImplementedError as e:
log.WARNING('%s, ignoring graph' % (e,))
log.WARNING(' %s' % (graph,))
return True
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -371,7 +371,7 @@
def f(i):
try:
g(i)
- except FooError, e:
+ except FooError as e:
return e.num
except Exception:
return 3
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -1363,7 +1363,7 @@
tr = Transformer()
try:
tr.rewrite_operation(op)
- except Exception, e:
+ except Exception as e:
assert 'foobar' in str(e)
def test_likely_unlikely():
diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py
--- a/rpython/jit/codewriter/test/test_regalloc.py
+++ b/rpython/jit/codewriter/test/test_regalloc.py
@@ -272,7 +272,7 @@
kref2 = bar(kref)
try:
return g(n)
- except FooError, e:
+ except FooError as e:
if foo(e):
return kref
else:
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -172,7 +172,7 @@
# call the method bhimpl_xxx()
try:
result = unboundmethod(*args)
- except Exception, e:
+ except Exception as e:
if verbose and not we_are_translated():
print '-> %s!' % (e.__class__.__name__,)
if resulttype == 'i' or resulttype == 'r' or resulttype == 'f':
@@ -323,7 +323,7 @@
break
except jitexc.JitException:
raise # go through
- except Exception, e:
+ except Exception as e:
lle = get_llexception(self.cpu, e)
self.handle_exception_in_frame(lle)
@@ -1540,9 +1540,9 @@
# we now proceed to interpret the bytecode in this frame
self.run()
#
- except jitexc.JitException, e:
+ except jitexc.JitException as e:
raise # go through
- except Exception, e:
+ except Exception as e:
# if we get an exception, return it to the caller frame
current_exc = get_llexception(self.cpu, e)
if not self.nextblackholeinterp:
@@ -1673,7 +1673,7 @@
# We have reached a recursive portal level.
try:
blackholeinterp._handle_jitexception_in_portal(exc)
- except Exception, e:
+ except Exception as e:
# It raised a general exception (it should not be a JitException here).
lle = get_llexception(blackholeinterp.cpu, e)
else:
diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
--- a/rpython/jit/metainterp/executor.py
+++ b/rpython/jit/metainterp/executor.py
@@ -51,28 +51,28 @@
if rettype == INT:
try:
result = cpu.bh_call_i(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
result = 0
return result
if rettype == REF:
try:
result = cpu.bh_call_r(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
result = NULL
return result
if rettype == FLOAT:
try:
result = cpu.bh_call_f(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
result = longlong.ZEROF
return result
if rettype == VOID:
try:
cpu.bh_call_v(func, args_i, args_r, args_f, descr)
- except Exception, e:
+ except Exception as e:
metainterp.execute_raised(e)
return None
raise AssertionError("bad rettype")
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py b/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
@@ -39,7 +39,7 @@
def raises(self, e, fn, *args):
try:
fn(*args)
- except Exception, e:
+ except Exception as e:
return e
opt = allopts[optnum]
diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py
--- a/rpython/jit/metainterp/optimizeopt/virtualstate.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py
@@ -91,7 +91,7 @@
state.renum[self.position] = other.position
try:
self._generate_guards(other, op, runtime_op, state)
- except VirtualStatesCantMatch, e:
+ except VirtualStatesCantMatch as e:
state.bad[self] = state.bad[other] = None
if e.state is None:
e.state = state
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2034,7 +2034,7 @@
else:
try:
self.compile_done_with_this_frame(resultbox)
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.aborted_tracing(stb.reason)
sd = self.staticdata
result_type = self.jitdriver_sd.result_type
@@ -2067,7 +2067,7 @@
self.popframe()
try:
self.compile_exit_frame_with_exception(self.last_exc_box)
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.aborted_tracing(stb.reason)
raise jitexc.ExitFrameWithExceptionRef(self.cpu, lltype.cast_opaque_ptr(llmemory.GCREF, excvalue))
@@ -2100,7 +2100,7 @@
guard_op = self.history.record(opnum, moreargs,
lltype.nullptr(llmemory.GCREF.TO))
else:
- guard_op = self.history.record(opnum, moreargs, None)
+ guard_op = self.history.record(opnum, moreargs, None)
self.capture_resumedata(resumepc)
# ^^^ records extra to history
self.staticdata.profiler.count_ops(opnum, Counters.GUARDS)
@@ -2254,7 +2254,7 @@
def execute_raised(self, exception, constant=False):
if isinstance(exception, jitexc.JitException):
- raise jitexc.JitException, exception # go through
+ raise exception # go through
llexception = jitexc.get_llexception(self.cpu, exception)
self.execute_ll_raised(llexception, constant)
@@ -2367,7 +2367,7 @@
self.seen_loop_header_for_jdindex = -1
try:
self.interpret()
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.run_blackhole_interp_to_cancel_tracing(stb)
assert False, "should always raise"
@@ -2404,7 +2404,7 @@
if self.resumekey_original_loop_token is None: # very rare case
raise SwitchToBlackhole(Counters.ABORT_BRIDGE)
self.interpret()
- except SwitchToBlackhole, stb:
+ except SwitchToBlackhole as stb:
self.run_blackhole_interp_to_cancel_tracing(stb)
assert False, "should always raise"
@@ -3276,7 +3276,7 @@
print '\tpyjitpl: %s(%s)' % (name, ', '.join(map(repr, args))),
try:
resultbox = unboundmethod(self, *args)
- except Exception, e:
+ except Exception as e:
if self.debug:
print '-> %s!' % e.__class__.__name__
raise
diff --git a/rpython/jit/metainterp/test/test_blackhole.py b/rpython/jit/metainterp/test/test_blackhole.py
--- a/rpython/jit/metainterp/test/test_blackhole.py
+++ b/rpython/jit/metainterp/test/test_blackhole.py
@@ -205,7 +205,7 @@
myjitdriver.jit_merge_point(x=x, y=y)
try:
choices(x)
- except FooError, e:
+ except FooError as e:
if e.num == 0:
break
y += e.num
diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py
--- a/rpython/jit/metainterp/test/test_compile.py
+++ b/rpython/jit/metainterp/test/test_compile.py
@@ -164,7 +164,7 @@
fail_descr = cpu.get_latest_descr(deadframe)
try:
fail_descr.handle_fail(deadframe, FakeMetaInterpSD(), None)
- except jitexc.ExitFrameWithExceptionRef, e:
+ except jitexc.ExitFrameWithExceptionRef as e:
assert lltype.cast_opaque_ptr(lltype.Ptr(EXC), e.value) == llexc
else:
assert 0, "should have raised"
diff --git a/rpython/jit/metainterp/test/test_exception.py b/rpython/jit/metainterp/test/test_exception.py
--- a/rpython/jit/metainterp/test/test_exception.py
+++ b/rpython/jit/metainterp/test/test_exception.py
@@ -17,7 +17,7 @@
def f(n):
try:
return g(n)
- except MyError, e:
+ except MyError as e:
return e.n + 10
res = self.interp_operations(f, [9])
assert res == 8
@@ -141,7 +141,7 @@
try:
b(n)
return 0
- except MyError, e:
+ except MyError as e:
return e.n
def f(n):
return a(n)
@@ -161,7 +161,7 @@
myjitdriver.jit_merge_point(n=n)
try:
check(n, 0)
- except MyError, e:
+ except MyError as e:
n = check(e.n, 1)
return n
assert f(53) == -2
@@ -290,7 +290,7 @@
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = n - check(n)
- except MyError, e:
+ except MyError as e:
return e.n
assert f(53) == -2
res = self.meta_interp(f, [53], policy=StopAtXPolicy(check))
@@ -517,7 +517,7 @@
def f(n):
try:
portal(n)
- except SomeException, e:
+ except SomeException as e:
return 3
return 2
@@ -536,7 +536,7 @@
def main(n):
try:
f(n)
- except MyError, e:
+ except MyError as e:
return e.n
res = self.meta_interp(main, [41], repeat=7)
@@ -572,7 +572,7 @@
try:
f(n)
return 3
- except MyError, e:
+ except MyError as e:
return e.n
except ValueError:
return 8
@@ -590,7 +590,7 @@
def f(x):
try:
return g(x)
- except Exception, e:
+ except Exception as e:
if isinstance(e, OverflowError):
return -42
raise
diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py
--- a/rpython/jit/metainterp/test/test_recursive.py
+++ b/rpython/jit/metainterp/test/test_recursive.py
@@ -729,7 +729,7 @@
if codeno == 2:
try:
portal(1)
- except MyException, me:
+ except MyException as me:
i += me.x
i += 1
if codeno == 1:
@@ -1092,7 +1092,7 @@
if codeno < 10:
try:
portal(codeno + 5, k+1)
- except GotValue, e:
+ except GotValue as e:
i += e.result
codeno += 1
elif codeno == 10:
@@ -1106,7 +1106,7 @@
def main(codeno, k):
try:
portal(codeno, k)
- except GotValue, e:
+ except GotValue as e:
return e.result
assert main(0, 1) == 2095
diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py
--- a/rpython/jit/metainterp/test/test_virtualizable.py
+++ b/rpython/jit/metainterp/test/test_virtualizable.py
@@ -665,7 +665,7 @@
jitdriver.jit_merge_point(frame=frame)
try:
g()
- except FooError, e:
+ except FooError as e:
frame.x -= e.value
frame.y += 1
return frame.x
diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py
--- a/rpython/jit/metainterp/test/test_warmspot.py
+++ b/rpython/jit/metainterp/test/test_warmspot.py
@@ -45,7 +45,7 @@
def main(a):
try:
interpreter_loop(a)
- except Exit, e:
+ except Exit as e:
return e.result
res = self.meta_interp(main, [1])
@@ -674,7 +674,7 @@
assert jd._assembler_call_helper(FakeDeadFrame(1), 0) == 10
try:
jd._assembler_call_helper(FakeDeadFrame(3), 0)
- except LLException, lle:
+ except LLException as lle:
assert lle[0] == self.exc_vtable
else:
py.test.fail("DID NOT RAISE")
diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py
--- a/rpython/jit/metainterp/warmspot.py
+++ b/rpython/jit/metainterp/warmspot.py
@@ -82,7 +82,7 @@
backendopt=False, trace_limit=sys.maxint, inline=False,
loop_longevity=0, retrace_limit=5, function_threshold=4,
disable_unrolling=sys.maxint,
- enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15,
+ enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15,
max_unroll_recursion=7, vec=1, vec_all=0, vec_cost=0,
vec_length=60, vec_ratio=2, vec_guard_ratio=3, **kwds):
from rpython.config.config import ConfigError
@@ -489,7 +489,7 @@
if opencoder_model == 'big':
self.metainterp_sd.opencoder_model = BigModel
else:
- self.metainterp_sd.opencoder_model = Model
+ self.metainterp_sd.opencoder_model = Model
self.stats.metainterp_sd = self.metainterp_sd
def make_virtualizable_infos(self):
@@ -543,7 +543,7 @@
raise # go through
except StackOverflow:
raise # go through
- except Exception, e:
+ except Exception as e:
if not we_are_translated():
print "~~~ Crash in JIT!"
print '~~~ %s: %s' % (e.__class__, e)
@@ -908,7 +908,7 @@
# want to interrupt the whole interpreter loop.
return support.maybe_on_top_of_llinterp(rtyper,
portal_ptr)(*args)
- except jitexc.ContinueRunningNormally, e:
+ except jitexc.ContinueRunningNormally as e:
args = ()
for ARGTYPE, attrname, count in portalfunc_ARGS:
x = getattr(e, attrname)[count]
@@ -919,28 +919,28 @@
except jitexc.DoneWithThisFrameVoid:
assert result_kind == 'void'
return
- except jitexc.DoneWithThisFrameInt, e:
+ except jitexc.DoneWithThisFrameInt as e:
assert result_kind == 'int'
return specialize_value(RESULT, e.result)
- except jitexc.DoneWithThisFrameRef, e:
+ except jitexc.DoneWithThisFrameRef as e:
assert result_kind == 'ref'
return specialize_value(RESULT, e.result)
- except jitexc.DoneWithThisFrameFloat, e:
+ except jitexc.DoneWithThisFrameFloat as e:
assert result_kind == 'float'
return specialize_value(RESULT, e.result)
- except jitexc.ExitFrameWithExceptionRef, e:
+ except jitexc.ExitFrameWithExceptionRef as e:
value = ts.cast_to_baseclass(e.value)
if not we_are_translated():
raise LLException(ts.get_typeptr(value), value)
else:
value = cast_base_ptr_to_instance(Exception, value)
- raise Exception, value
+ raise value
def handle_jitexception(e):
# XXX the bulk of this function is mostly a copy-paste from above
try:
raise e
- except jitexc.ContinueRunningNormally, e:
+ except jitexc.ContinueRunningNormally as e:
args = ()
for ARGTYPE, attrname, count in portalfunc_ARGS:
x = getattr(e, attrname)[count]
@@ -953,22 +953,22 @@
except jitexc.DoneWithThisFrameVoid:
assert result_kind == 'void'
return
- except jitexc.DoneWithThisFrameInt, e:
+ except jitexc.DoneWithThisFrameInt as e:
assert result_kind == 'int'
return e.result
- except jitexc.DoneWithThisFrameRef, e:
+ except jitexc.DoneWithThisFrameRef as e:
assert result_kind == 'ref'
return e.result
- except jitexc.DoneWithThisFrameFloat, e:
+ except jitexc.DoneWithThisFrameFloat as e:
assert result_kind == 'float'
return e.result
- except jitexc.ExitFrameWithExceptionRef, e:
+ except jitexc.ExitFrameWithExceptionRef as e:
value = ts.cast_to_baseclass(e.value)
if not we_are_translated():
raise LLException(ts.get_typeptr(value), value)
else:
value = cast_base_ptr_to_instance(Exception, value)
- raise Exception, value
+ raise value
jd._ll_portal_runner = ll_portal_runner # for debugging
jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE,
@@ -986,7 +986,7 @@
fail_descr = self.cpu.get_latest_descr(deadframe)
try:
fail_descr.handle_fail(deadframe, self.metainterp_sd, jd)
- except jitexc.JitException, e:
+ except jitexc.JitException as e:
return handle_jitexception(e)
else:
assert 0, "should have raised"
diff --git a/rpython/jit/tl/test/test_pypyjit.py b/rpython/jit/tl/test/test_pypyjit.py
--- a/rpython/jit/tl/test/test_pypyjit.py
+++ b/rpython/jit/tl/test/test_pypyjit.py
@@ -21,7 +21,7 @@
def check_crasher(func_name):
try:
JIT_EXECUTABLE.sysexec(CRASH_FILE, func_name)
- except py.process.cmdexec.Error, e:
+ except py.process.cmdexec.Error as e:
print "stderr"
print "------"
print e.err
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -639,13 +639,14 @@
# Build the object.
llarena.arena_reserve(result, totalsize)
obj = result + size_gc_header
- if is_finalizer_light:
- self.young_objects_with_light_finalizers.append(obj)
self.init_gc_object(result, typeid, flags=0)
- #
- # If it is a weakref, record it (check constant-folded).
- if contains_weakptr:
- self.young_objects_with_weakrefs.append(obj)
+ #
+ # If it is a weakref or has a lightweight finalizer, record it
+ # (checks constant-folded).
+ if is_finalizer_light:
+ self.young_objects_with_light_finalizers.append(obj)
+ if contains_weakptr:
+ self.young_objects_with_weakrefs.append(obj)
#
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
@@ -2899,7 +2900,7 @@
# force the corresponding object to be alive
intobj = self._pyobj(pyobject).ob_pypy_link
singleaddr.address[0] = llmemory.cast_int_to_adr(intobj)
- self._trace_drag_out(singleaddr, llmemory.NULL)
+ self._trace_drag_out1(singleaddr)
def rrc_minor_collection_free(self):
ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 1")
diff --git a/rpython/memory/gctransform/support.py b/rpython/memory/gctransform/support.py
--- a/rpython/memory/gctransform/support.py
+++ b/rpython/memory/gctransform/support.py
@@ -80,7 +80,7 @@
def ll_call_destructor(destrptr, destr_v, typename):
try:
destrptr(destr_v)
- except Exception, e:
+ except Exception as e:
try:
write(2, "a destructor of type ")
write(2, typename)
diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py
--- a/rpython/memory/gctransform/transform.py
+++ b/rpython/memory/gctransform/transform.py
@@ -129,7 +129,7 @@
raise_analyzer,
cleanup=False)
must_constfold = True
- except inline.CannotInline, e:
+ except inline.CannotInline as e:
print 'CANNOT INLINE:', e
print '\t%s into %s' % (inline_graph, graph)
cleanup_graph(graph)
diff --git a/rpython/rlib/parsing/main.py b/rpython/rlib/parsing/main.py
--- a/rpython/rlib/parsing/main.py
+++ b/rpython/rlib/parsing/main.py
@@ -7,7 +7,7 @@
try:
t = py.path.local(filename).read(mode='U')
regexs, rules, ToAST = parse_ebnf(t)
- except ParseError, e:
+ except ParseError as e:
print e.nice_error_message(filename=filename, source=t)
raise
return make_parse_function(regexs, rules, eof=True)
diff --git a/rpython/rlib/parsing/makepackrat.py b/rpython/rlib/parsing/makepackrat.py
--- a/rpython/rlib/parsing/makepackrat.py
+++ b/rpython/rlib/parsing/makepackrat.py
@@ -632,7 +632,7 @@
p = PyPackratSyntaxParser(source)
try:
t = p.file()
- except BacktrackException, exc:
+ except BacktrackException as exc:
print exc.error.nice_error_message("", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
diff --git a/rpython/rlib/parsing/pypackrat.py b/rpython/rlib/parsing/pypackrat.py
--- a/rpython/rlib/parsing/pypackrat.py
+++ b/rpython/rlib/parsing/pypackrat.py
@@ -29,7 +29,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -61,7 +61,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -93,7 +93,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -125,7 +125,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -167,14 +167,14 @@
_result = _call_status.result
_error = _call_status.error
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
@@ -197,7 +197,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -231,7 +231,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -265,7 +265,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -299,7 +299,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -360,7 +360,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
@@ -403,7 +403,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -433,7 +433,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -480,7 +480,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -504,7 +504,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -551,7 +551,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -569,7 +569,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -586,7 +586,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
@@ -600,7 +600,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -623,7 +623,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -670,7 +670,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -691,7 +691,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -705,14 +705,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -730,7 +730,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
@@ -744,21 +744,21 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
@@ -781,7 +781,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -833,7 +833,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -856,14 +856,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
@@ -875,7 +875,7 @@
last = _result
_result = (Nonterminal('or', l + [last]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
@@ -884,7 +884,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
@@ -909,7 +909,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -976,7 +976,7 @@
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -984,7 +984,7 @@
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
@@ -993,7 +993,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -1018,7 +1018,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1073,7 +1073,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1115,7 +1115,7 @@
_result = _call_status.result
_error = _call_status.error
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
@@ -1124,7 +1124,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
@@ -1133,7 +1133,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
@@ -1142,7 +1142,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
@@ -1151,7 +1151,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
@@ -1160,7 +1160,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -1185,7 +1185,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1229,7 +1229,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1246,7 +1246,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -1269,7 +1269,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1323,7 +1323,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -1337,7 +1337,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -1354,14 +1354,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
@@ -1375,7 +1375,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
@@ -1392,14 +1392,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
@@ -1412,7 +1412,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
@@ -1429,7 +1429,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
@@ -1453,7 +1453,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1497,7 +1497,7 @@
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1514,7 +1514,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -1528,7 +1528,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
@@ -1545,7 +1545,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -1572,7 +1572,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1619,7 +1619,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1643,7 +1643,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1690,7 +1690,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
@@ -1704,7 +1704,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
@@ -1731,7 +1731,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1781,7 +1781,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -1795,14 +1795,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
@@ -1819,7 +1819,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
@@ -1829,14 +1829,14 @@
try:
_result = self.__chars__('*')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
@@ -1851,14 +1851,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -1874,7 +1874,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
@@ -1884,14 +1884,14 @@
try:
_result = self.__chars__('*')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
@@ -1906,7 +1906,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
@@ -1930,7 +1930,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -1977,7 +1977,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -1994,14 +1994,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
@@ -2010,7 +2010,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
@@ -2035,7 +2035,7 @@
_status.result = _result
_status.error = _error
return _status
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
@@ -2082,7 +2082,7 @@
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
@@ -2099,7 +2099,7 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
@@ -2113,14 +2113,14 @@
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
- except BacktrackException, _exc:
+ except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
From pypy.commits at gmail.com Mon May 2 20:23:45 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:23:45 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge oefmt (b974474) oefmt
pypy/{objspace, tool}/
Message-ID: <5727ef91.8d1f1c0a.a4361.65cc@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84155:86092fa1069f
Date: 2016-05-02 17:22 -0700
http://bitbucket.org/pypy/pypy/changeset/86092fa1069f/
Log: merge oefmt (b974474) oefmt pypy/{objspace,tool}/
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -247,8 +247,8 @@
if space.is_w(w_restype, space.w_int):
return space.int_w(w_res) != 0
else:
- msg = "__nonzero__ should return bool or integer"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "__nonzero__ should return bool or integer")
def nonzero(space, w_obj):
if space.is_true(w_obj):
@@ -282,8 +282,7 @@
w_iter = space.get_and_call_function(w_descr, w_obj)
w_next = space.lookup(w_iter, 'next')
if w_next is None:
- raise OperationError(space.w_TypeError,
- space.wrap("iter() returned non-iterator"))
+ raise oefmt(space.w_TypeError, "iter() returned non-iterator")
return w_iter
def next(space, w_obj):
@@ -382,8 +381,7 @@
if _check_notimplemented(space, w_res):
return w_res
- raise OperationError(space.w_TypeError,
- space.wrap("operands do not support **"))
+ raise oefmt(space.w_TypeError, "operands do not support **")
def inplace_pow(space, w_lhs, w_rhs):
w_impl = space.lookup(w_lhs, '__ipow__')
@@ -439,8 +437,8 @@
bigint = space.bigint_w(w_result)
return space.wrap(bigint.hash())
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__hash__() should return an int or long"))
+ raise oefmt(space.w_TypeError,
+ "__hash__() should return an int or long")
def userdel(space, w_obj):
w_del = space.lookup(w_obj, '__del__')
@@ -469,8 +467,7 @@
def coerce(space, w_obj1, w_obj2):
w_res = space.try_coerce(w_obj1, w_obj2)
if w_res is None:
- raise OperationError(space.w_TypeError,
- space.wrap("coercion failed"))
+ raise oefmt(space.w_TypeError, "coercion failed")
return w_res
def try_coerce(space, w_obj1, w_obj2):
@@ -494,13 +491,13 @@
return None
if (not space.isinstance_w(w_res, space.w_tuple) or
space.len_w(w_res) != 2):
- raise OperationError(space.w_TypeError,
- space.wrap("coercion should return None or 2-tuple"))
+ raise oefmt(space.w_TypeError,
+ "coercion should return None or 2-tuple")
w_res = space.newtuple([space.getitem(w_res, space.wrap(1)), space.getitem(w_res, space.wrap(0))])
elif (not space.isinstance_w(w_res, space.w_tuple) or
space.len_w(w_res) != 2):
- raise OperationError(space.w_TypeError,
- space.wrap("coercion should return None or 2-tuple"))
+ raise oefmt(space.w_TypeError,
+ "coercion should return None or 2-tuple")
return w_res
def issubtype(space, w_sub, w_type):
@@ -517,8 +514,7 @@
def issubtype_allow_override(space, w_sub, w_type):
w_check = space.lookup(w_type, "__subclasscheck__")
if w_check is None:
- raise OperationError(space.w_TypeError,
- space.wrap("issubclass not supported here"))
+ raise oefmt(space.w_TypeError, "issubclass not supported here")
return space.get_and_call_function(w_check, w_type, w_sub)
def isinstance_allow_override(space, w_inst, w_type):
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -446,8 +446,8 @@
return StringBuffer(self._value)
def writebuf_w(self, space):
- raise OperationError(space.w_TypeError, space.wrap(
- "Cannot use string as modifiable buffer"))
+ raise oefmt(space.w_TypeError,
+ "Cannot use string as modifiable buffer")
charbuf_w = str_w
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -41,7 +41,8 @@
if space.is_w(space.type(w_key), space.w_str):
self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
else:
- raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type"))
+ raise oefmt(space.w_TypeError,
+ "cannot add non-string keys to dict of a type")
def setitem_str(self, w_dict, key, w_value):
w_type = self.unerase(w_dict.dstorage)
diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py
--- a/pypy/objspace/std/formatting.py
+++ b/pypy/objspace/std/formatting.py
@@ -28,27 +28,24 @@
try:
w_result = self.values_w[self.values_pos]
except IndexError:
- space = self.space
- raise OperationError(space.w_TypeError, space.wrap(
- 'not enough arguments for format string'))
+ raise oefmt(self.space.w_TypeError,
+ "not enough arguments for format string")
else:
self.values_pos += 1
return w_result
def checkconsumed(self):
if self.values_pos < len(self.values_w) and self.w_valuedict is None:
- space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap('not all arguments converted '
- 'during string formatting'))
+ raise oefmt(self.space.w_TypeError,
+ "not all arguments converted during string formatting")
def std_wp_int(self, r, prefix='', keep_zero=False):
# use self.prec to add some '0' on the left of the number
if self.prec >= 0:
if self.prec > 1000:
- raise OperationError(
- self.space.w_OverflowError, self.space.wrap(
- 'formatted integer is too long (precision too large?)'))
+ raise oefmt(self.space.w_OverflowError,
+ "formatted integer is too long (precision too "
+ "large?)")
sign = r[0] == '-'
padding = self.prec - (len(r)-int(sign))
if padding > 0:
@@ -170,9 +167,7 @@
try:
return self.fmt[self.fmtpos]
except IndexError:
- space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("incomplete format"))
+ raise oefmt(self.space.w_ValueError, "incomplete format")
# Only shows up if we've already started inlining format(), so just
# unconditionally unroll this.
@@ -188,8 +183,7 @@
c = fmt[i]
except IndexError:
space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("incomplete format key"))
+ raise oefmt(space.w_ValueError, "incomplete format key")
if c == ')':
pcount -= 1
if pcount == 0:
@@ -204,8 +198,7 @@
# return the value corresponding to a key in the input dict
space = self.space
if self.w_valuedict is None:
- raise OperationError(space.w_TypeError,
- space.wrap("format requires a mapping"))
+ raise oefmt(space.w_TypeError, "format requires a mapping")
w_key = space.wrap(key)
return space.getitem(self.w_valuedict, w_key)
@@ -347,9 +340,9 @@
s = space.str_w(w_s)
else:
s = c
- msg = "unsupported format character '%s' (0x%x) at index %d" % (
- s, ord(c), self.fmtpos - 1)
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "unsupported format character '%s' (%s) at index %d",
+ s, hex(ord(c)), self.fmtpos - 1)
def std_wp(self, r):
length = len(r)
@@ -434,9 +427,8 @@
space = self.space
w_impl = space.lookup(w_value, '__str__')
if w_impl is None:
- raise OperationError(space.w_TypeError,
- space.wrap("operand does not support "
- "unary str"))
+ raise oefmt(space.w_TypeError,
+ "operand does not support unary str")
w_result = space.get_and_call_function(w_impl, w_value)
if space.isinstance_w(w_result,
space.w_unicode):
@@ -469,16 +461,14 @@
if space.isinstance_w(w_value, space.w_str):
s = space.str_w(w_value)
if len(s) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("%c requires int or char"))
+ raise oefmt(space.w_TypeError, "%c requires int or char")
self.std_wp(s)
elif space.isinstance_w(w_value, space.w_unicode):
if not do_unicode:
raise NeedUnicodeFormattingError
ustr = space.unicode_w(w_value)
if len(ustr) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("%c requires int or unichar"))
+ raise oefmt(space.w_TypeError, "%c requires int or unichar")
self.std_wp(ustr)
else:
n = space.int_w(w_value)
@@ -486,15 +476,15 @@
try:
c = unichr(n)
except ValueError:
- raise OperationError(space.w_OverflowError,
- space.wrap("unicode character code out of range"))
+ raise oefmt(space.w_OverflowError,
+ "unicode character code out of range")
self.std_wp(c)
else:
try:
s = chr(n)
- except ValueError: # chr(out-of-range)
- raise OperationError(space.w_OverflowError,
- space.wrap("character code not in range(256)"))
+ except ValueError:
+ raise oefmt(space.w_OverflowError,
+ "character code not in range(256)")
self.std_wp(s)
return StringFormatter
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -566,8 +566,7 @@
index = space.getindex_w(w_index, space.w_IndexError, "list index")
return self.getitem(index)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_getslice(self, space, w_start, w_stop):
length = self.length()
@@ -594,8 +593,7 @@
try:
self.setitem(idx, w_any)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_setslice(self, space, w_start, w_stop, w_iterable):
length = self.length()
@@ -621,8 +619,7 @@
try:
self.pop(idx)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_delslice(self, space, w_start, w_stop):
length = self.length()
@@ -662,8 +659,7 @@
index (default last)'''
length = self.length()
if length == 0:
- raise OperationError(space.w_IndexError,
- space.wrap("pop from empty list"))
+ raise oefmt(space.w_IndexError, "pop from empty list")
# clearly differentiate between list.pop() and list.pop(index)
if index == -1:
return self.pop_end() # cannot raise because list is not empty
@@ -672,8 +668,7 @@
try:
return self.pop(index)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("pop index out of range"))
+ raise oefmt(space.w_IndexError, "pop index out of range")
def descr_remove(self, space, w_value):
'L.remove(value) -- remove first occurrence of value'
@@ -769,8 +764,7 @@
self.__init__(space, sorter.list)
if mucked:
- raise OperationError(space.w_ValueError,
- space.wrap("list modified during sort"))
+ raise oefmt(space.w_ValueError, "list modified during sort")
find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find')
@@ -1489,14 +1483,15 @@
def setslice(self, w_list, start, step, slicelength, w_other):
assert slicelength >= 0
+ space = self.space
- if self is self.space.fromcache(ObjectListStrategy):
+ if self is space.fromcache(ObjectListStrategy):
w_other = w_other._temporarily_as_objects()
elif not self.list_is_correct_type(w_other) and w_other.length() != 0:
w_list.switch_to_object_strategy()
w_other_as_object = w_other._temporarily_as_objects()
assert (w_other_as_object.strategy is
- self.space.fromcache(ObjectListStrategy))
+ space.fromcache(ObjectListStrategy))
w_list.setslice(start, step, slicelength, w_other_as_object)
return
@@ -1522,7 +1517,7 @@
assert start >= 0
del items[start:start + delta]
elif len2 != slicelength: # No resize for extended slices
- raise oefmt(self.space.w_ValueError,
+ raise oefmt(space.w_ValueError,
"attempt to assign sequence of size %d to extended "
"slice of size %d", len2, slicelength)
@@ -2120,8 +2115,8 @@
result = space.int_w(w_result)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(space.w_TypeError,
- space.wrap("comparison function must return int"))
+ raise oefmt(space.w_TypeError,
+ "comparison function must return int")
raise
return result < 0
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -563,12 +563,11 @@
@objectmodel.dont_inline
def _obj_setdict(self, space, w_dict):
- from pypy.interpreter.error import OperationError
+ from pypy.interpreter.error import oefmt
terminator = self._get_mapdict_map().terminator
assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator)
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting dictionary to a non-dict"))
+ raise oefmt(space.w_TypeError, "setting dictionary to a non-dict")
assert isinstance(w_dict, W_DictMultiObject)
w_olddict = self.getdict(space)
assert isinstance(w_olddict, W_DictMultiObject)
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -63,8 +63,7 @@
else:
out = rstring.StringBuilder()
if not level:
- raise OperationError(space.w_ValueError,
- space.wrap("Recursion depth exceeded"))
+ raise oefmt(space.w_ValueError, "Recursion depth exceeded")
level -= 1
s = self.template
return self._do_build_string(start, end, level, out, s)
@@ -82,14 +81,12 @@
markup_follows = True
if c == "}":
if at_end or s[i] != "}":
- raise OperationError(space.w_ValueError,
- space.wrap("Single '}'"))
+ raise oefmt(space.w_ValueError, "Single '}'")
i += 1
markup_follows = False
if c == "{":
if at_end:
- raise OperationError(space.w_ValueError,
- space.wrap("Single '{'"))
+ raise oefmt(space.w_ValueError, "Single '{'")
if s[i] == "{":
i += 1
markup_follows = False
@@ -121,8 +118,7 @@
break
i += 1
if nested:
- raise OperationError(space.w_ValueError,
- space.wrap("Unmatched '{'"))
+ raise oefmt(space.w_ValueError, "Unmatched '{'")
rendered = self._render_field(field_start, i, recursive, level)
out.append(rendered)
i += 1
@@ -144,16 +140,15 @@
if c == "!":
i += 1
if i == end:
- w_msg = self.space.wrap("expected conversion")
- raise OperationError(self.space.w_ValueError, w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "expected conversion")
conversion = s[i]
i += 1
if i < end:
if s[i] != ':':
- w_msg = self.space.wrap("expected ':' after"
- " format specifier")
- raise OperationError(self.space.w_ValueError,
- w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "expected ':' after format "
+ "specifier")
i += 1
else:
conversion = None
@@ -189,13 +184,12 @@
if use_numeric:
if self.auto_numbering_state == ANS_MANUAL:
if empty:
- msg = "switching from manual to automatic numbering"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "switching from manual to automatic "
+ "numbering")
elif not empty:
- msg = "switching from automatic to manual numbering"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "switching from automatic to manual numbering")
if empty:
index = self.auto_numbering
self.auto_numbering += 1
@@ -217,8 +211,7 @@
try:
w_arg = self.args[index]
except IndexError:
- w_msg = space.wrap("index out of range")
- raise OperationError(space.w_IndexError, w_msg)
+ raise oefmt(space.w_IndexError, "out of range")
return self._resolve_lookups(w_arg, name, i, end)
@jit.unroll_safe
@@ -237,8 +230,8 @@
break
i += 1
if start == i:
- w_msg = space.wrap("Empty attribute in format string")
- raise OperationError(space.w_ValueError, w_msg)
+ raise oefmt(space.w_ValueError,
+ "Empty attribute in format string")
w_attr = space.wrap(name[start:i])
if w_obj is not None:
w_obj = space.getattr(w_obj, w_attr)
@@ -256,8 +249,7 @@
break
i += 1
if not got_bracket:
- raise OperationError(space.w_ValueError,
- space.wrap("Missing ']'"))
+ raise oefmt(space.w_ValueError, "Missing ']'")
index, reached = _parse_int(self.space, name, start, i)
if index != -1 and reached == i:
w_item = space.wrap(index)
@@ -270,8 +262,8 @@
self.parser_list_w.append(space.newtuple([
space.w_False, w_item]))
else:
- msg = "Only '[' and '.' may follow ']'"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Only '[' and '.' may follow ']'")
return w_obj
def formatter_field_name_split(self):
@@ -311,8 +303,7 @@
return space.call_function(space.w_unicode, w_obj)
return space.str(w_obj)
else:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("invalid conversion"))
+ raise oefmt(space.w_ValueError, "invalid conversion")
def _render_field(self, start, end, recursive, level):
name, conversion, spec_start = self._parse_field(start, end)
@@ -471,19 +462,17 @@
i += 1
self._precision, i = _parse_int(self.space, spec, i, length)
if self._precision == -1:
- raise OperationError(space.w_ValueError,
- space.wrap("no precision given"))
+ raise oefmt(space.w_ValueError, "no precision given")
if length - i > 1:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid format spec"))
+ raise oefmt(space.w_ValueError, "invalid format spec")
if length - i == 1:
presentation_type = spec[i]
if self.is_unicode:
try:
the_type = spec[i].encode("ascii")[0]
except UnicodeEncodeError:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid presentation type"))
+ raise oefmt(space.w_ValueError,
+ "invalid presentation type")
else:
the_type = presentation_type
i += 1
@@ -502,8 +491,7 @@
# ok
pass
else:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid type with ','"))
+ raise oefmt(space.w_ValueError, "invalid type with ','")
return False
def _calc_padding(self, string, length):
@@ -546,9 +534,8 @@
return rstring.StringBuilder()
def _unknown_presentation(self, tp):
- msg = "unknown presentation for %s: '%s'"
- w_msg = self.space.wrap(msg % (tp, self._type))
- raise OperationError(self.space.w_ValueError, w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "unknown presentation for %s: '%s'", tp, self._type)
def format_string(self, string):
space = self.space
@@ -557,14 +544,16 @@
if self._type != "s":
self._unknown_presentation("string")
if self._sign != "\0":
- msg = "Sign not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Sign not allowed in string format specifier")
if self._alternate:
- msg = "Alternate form (#) not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in string format "
+ "specifier")
if self._align == "=":
- msg = "'=' alignment not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "'=' alignment not allowed in string format "
+ "specifier")
length = len(string)
precision = self._precision
if precision != -1 and length >= precision:
@@ -762,14 +751,14 @@
def _format_int_or_long(self, w_num, kind):
space = self.space
if self._precision != -1:
- msg = "precision not allowed in integer type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "precision not allowed in integer type")
sign_char = "\0"
tp = self._type
if tp == "c":
if self._sign != "\0":
- msg = "sign not allowed with 'c' presentation type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "sign not allowed with 'c' presentation type")
value = space.int_w(w_num)
if self.is_unicode:
result = runicode.UNICHR(value)
@@ -920,8 +909,8 @@
flags = 0
default_precision = 6
if self._alternate:
- msg = "Alternate form (#) not allowed in float formats"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in float formats")
tp = self._type
self._get_locale(tp)
if tp == "\0":
@@ -989,18 +978,19 @@
default_precision = 6
if self._align == "=":
# '=' alignment is invalid
- msg = ("'=' alignment flag is not allowed in"
- " complex format specifier")
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "'=' alignment flag is not allowed in complex "
+ "format specifier")
if self._fill_char == "0":
- #zero padding is invalid
- msg = "Zero padding is not allowed in complex format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ # zero padding is invalid
+ raise oefmt(space.w_ValueError,
+ "Zero padding is not allowed in complex format "
+ "specifier")
if self._alternate:
- #alternate is invalid
- msg = "Alternate form (#) not allowed in complex format specifier"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ # alternate is invalid
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in complex format "
+ "specifier")
skip_re = 0
add_parens = 0
if tp == "\0":
diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py
--- a/pypy/objspace/std/objectobject.py
+++ b/pypy/objspace/std/objectobject.py
@@ -198,8 +198,7 @@
elif space.isinstance_w(w_format_spec, space.w_str):
w_as_str = space.str(w_obj)
else:
- msg = "format_spec must be a string"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "format_spec must be a string")
if space.len_w(w_format_spec) > 0:
msg = "object.__format__ with a non-empty format string is deprecated"
space.warn(space.wrap(msg), space.w_PendingDeprecationWarning)
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -374,8 +374,8 @@
# one is not
def _wrap_expected_length(self, expected, got):
- return OperationError(self.w_ValueError,
- self.wrap("expected length %d, got %d" % (expected, got)))
+ return oefmt(self.w_ValueError,
+ "expected length %d, got %d", expected, got)
def unpackiterable(self, w_obj, expected_length=-1):
if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj):
@@ -506,8 +506,7 @@
w_tup = self.call_function(w_indices, w_length)
l_w = self.unpackiterable(w_tup)
if not len(l_w) == 3:
- raise OperationError(self.w_ValueError,
- self.wrap("Expected tuple of length 3"))
+ raise oefmt(self.w_ValueError, "Expected tuple of length 3")
return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2])
_DescrOperation_is_true = is_true
@@ -613,13 +612,12 @@
def _type_issubtype(self, w_sub, w_type):
if isinstance(w_sub, W_TypeObject) and isinstance(w_type, W_TypeObject):
return self.wrap(w_sub.issubtype(w_type))
- raise OperationError(self.w_TypeError, self.wrap("need type objects"))
+ raise oefmt(self.w_TypeError, "need type objects")
@specialize.arg_or_var(2)
def _type_isinstance(self, w_inst, w_type):
if not isinstance(w_type, W_TypeObject):
- raise OperationError(self.w_TypeError,
- self.wrap("need type object"))
+ raise oefmt(self.w_TypeError, "need type object")
if is_annotation_constant(w_type):
cls = self._get_interplevel_cls(w_type)
if cls is not None:
diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py
--- a/pypy/objspace/std/proxyobject.py
+++ b/pypy/objspace/std/proxyobject.py
@@ -1,7 +1,7 @@
""" transparent list implementation
"""
from pypy.interpreter import baseobjspace
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
def transparent_class(name, BaseCls):
@@ -20,8 +20,9 @@
return self.w_type
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("You cannot override __class__ for transparent proxies"))
+ raise oefmt(space.w_TypeError,
+ "You cannot override __class__ for transparent "
+ "proxies")
def getdictvalue(self, space, attr):
try:
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -1,6 +1,6 @@
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.signature import Signature
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std.bytesobject import W_BytesObject
@@ -173,8 +173,7 @@
def descr_cmp(self, space, w_other):
if space.is_w(space.type(self), space.type(w_other)):
# hack hack until we get the expected result
- raise OperationError(space.w_TypeError,
- space.wrap('cannot compare sets using cmp()'))
+ raise oefmt(space.w_TypeError, "cannot compare sets using cmp()")
else:
return space.w_NotImplemented
@@ -840,8 +839,7 @@
return EmptyIteratorImplementation(self.space, self, w_set)
def popitem(self, w_set):
- raise OperationError(self.space.w_KeyError,
- self.space.wrap('pop from an empty set'))
+ raise oefmt(self.space.w_KeyError, "pop from an empty set")
class AbstractUnwrappedSetStrategy(object):
@@ -1198,8 +1196,7 @@
result = storage.popitem()
except KeyError:
# strategy may still be the same even if dict is empty
- raise OperationError(self.space.w_KeyError,
- self.space.wrap('pop from an empty set'))
+ raise oefmt(self.space.w_KeyError, "pop from an empty set")
return self.wrap(result[0])
@@ -1421,8 +1418,8 @@
return None
if self.len != self.setimplementation.length():
self.len = -1 # Make this error state sticky
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("set changed size during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "set changed size during iteration")
# look for the next entry
if self.pos < self.len:
result = self.next_entry()
@@ -1435,8 +1432,8 @@
# We try to explicitly look it up in the set.
if not self.setimplementation.has_key(result):
self.len = -1 # Make this error state sticky
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("dictionary changed during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "dictionary changed during iteration")
return result
# no more entries
self.setimplementation = None
diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py
--- a/pypy/objspace/std/sliceobject.py
+++ b/pypy/objspace/std/sliceobject.py
@@ -3,7 +3,7 @@
import sys
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import GetSetProperty, TypeDef
from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
@@ -29,8 +29,7 @@
else:
step = _eval_slice_index(space, w_slice.w_step)
if step == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("slice step cannot be zero"))
+ raise oefmt(space.w_ValueError, "slice step cannot be zero")
if space.is_w(w_slice.w_start, space.w_None):
if step < 0:
start = length - 1
@@ -98,11 +97,9 @@
elif len(args_w) == 3:
w_start, w_stop, w_step = args_w
elif len(args_w) > 3:
- raise OperationError(space.w_TypeError,
- space.wrap("slice() takes at most 3 arguments"))
+ raise oefmt(space.w_TypeError, "slice() takes at most 3 arguments")
else:
- raise OperationError(space.w_TypeError,
- space.wrap("slice() takes at least 1 argument"))
+ raise oefmt(space.w_TypeError, "slice() takes at least 1 argument")
w_obj = space.allocate_instance(W_SliceObject, w_slicetype)
W_SliceObject.__init__(w_obj, w_start, w_stop, w_step)
return w_obj
@@ -166,8 +163,7 @@
def fget(space, w_obj):
from pypy.objspace.std.sliceobject import W_SliceObject
if not isinstance(w_obj, W_SliceObject):
- raise OperationError(space.w_TypeError,
- space.wrap("descriptor is for 'slice'"))
+ raise oefmt(space.w_TypeError, "descriptor is for 'slice'")
return getattr(w_obj, name)
return GetSetProperty(fget)
@@ -200,9 +196,9 @@
except OperationError as err:
if not err.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError,
- space.wrap("slice indices must be integers or "
- "None or have an __index__ method"))
+ raise oefmt(space.w_TypeError,
+ "slice indices must be integers or None or have an "
+ "__index__ method")
def adapt_lower_bound(space, size, w_index):
index = _eval_slice_index(space, w_index)
diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py
--- a/pypy/objspace/std/specialisedtupleobject.py
+++ b/pypy/objspace/std/specialisedtupleobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.objspace.std.tupleobject import W_AbstractTupleObject
from pypy.objspace.std.util import negate
from rpython.rlib.objectmodel import compute_hash, specialize
@@ -117,8 +117,7 @@
if typetuple[i] != object:
value = space.wrap(value)
return value
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
cls.__name__ = ('W_SpecialisedTupleObject_' +
''.join([t.__name__[0] for t in typetuple]))
@@ -181,8 +180,7 @@
def specialized_zip_2_lists(space, w_list1, w_list2):
from pypy.objspace.std.listobject import W_ListObject
if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject:
- raise OperationError(space.w_TypeError,
- space.wrap("expected two exact lists"))
+ raise oefmt(space.w_TypeError, "expected two exact lists")
if space.config.objspace.std.withspecialisedtuple:
intlist1 = w_list1.getitems_int()
diff --git a/pypy/objspace/std/transparent.py b/pypy/objspace/std/transparent.py
--- a/pypy/objspace/std/transparent.py
+++ b/pypy/objspace/std/transparent.py
@@ -49,7 +49,7 @@
Return something that looks like it is of type typ. Its behaviour is
completely controlled by the controller."""
if not space.is_true(space.callable(w_controller)):
- raise OperationError(space.w_TypeError, space.wrap("controller should be function"))
+ raise oefmt(space.w_TypeError, "controller should be function")
if isinstance(w_type, W_TypeObject):
if space.is_true(space.issubtype(w_type, space.gettypeobject(Function.typedef))):
@@ -65,7 +65,7 @@
if w_type.layout.typedef is space.w_object.layout.typedef:
return W_Transparent(space, w_type, w_controller)
else:
- raise OperationError(space.w_TypeError, space.wrap("type expected as first argument"))
+ raise oefmt(space.w_TypeError, "type expected as first argument")
w_lookup = w_type
for k, v in type_cache.cache:
if w_lookup == k:
diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py
--- a/pypy/objspace/std/tupleobject.py
+++ b/pypy/objspace/std/tupleobject.py
@@ -3,7 +3,7 @@
import sys
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import TypeDef
@@ -213,8 +213,7 @@
w_item = self.tolist()[i]
if space.eq_w(w_item, w_obj):
return space.wrap(i)
- raise OperationError(space.w_ValueError,
- space.wrap("tuple.index(x): x not in tuple"))
+ raise oefmt(space.w_ValueError, "tuple.index(x): x not in tuple")
W_AbstractTupleObject.typedef = TypeDef(
"tuple",
@@ -326,8 +325,7 @@
try:
return self.wrappeditems[index]
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
def wraptuple(space, list_w):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -1,7 +1,7 @@
import weakref
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root, SpaceCache
-from pypy.interpreter.error import oefmt, OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import Function, StaticMethod
from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
descr_get_dict, dict_descr, Member, TypeDef
@@ -1240,8 +1240,8 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise OperationError(space.w_TypeError, space.wrap(
- "cycle among base classes: " + ' < '.join(names)))
+ raise oefmt(space.w_TypeError,
+ "cycle among base classes: %s", ' < '.join(names))
class TypeCache(SpaceCache):
diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -73,8 +73,8 @@
return StringBuffer(builder.build())
def writebuf_w(self, space):
- raise OperationError(space.w_TypeError, space.wrap(
- "cannot use unicode as modifiable buffer"))
+ raise oefmt(space.w_TypeError,
+ "cannot use unicode as modifiable buffer")
charbuf_w = str_w
diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py
--- a/pypy/tool/pytest/appsupport.py
+++ b/pypy/tool/pytest/appsupport.py
@@ -2,7 +2,7 @@
import py
from pypy.interpreter import gateway, pycode
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
try:
from _pytest.assertion.newinterpret import interpret
@@ -232,9 +232,8 @@
args_w, kwds_w = __args__.unpack()
if space.isinstance_w(w_expr, space.w_str):
if args_w:
- raise OperationError(space.w_TypeError,
- space.wrap("raises() takes no argument "
- "after a string expression"))
+ raise oefmt(space.w_TypeError,
+ "raises() takes no argument after a string expression")
expr = space.unwrap(w_expr)
source = py.code.Source(expr)
frame = space.getexecutioncontext().gettopframe()
@@ -264,8 +263,7 @@
if e.match(space, w_ExpectedException):
return _exc_info(space, e)
raise
- raise OperationError(space.w_AssertionError,
- space.wrap("DID NOT RAISE"))
+ raise oefmt(space.w_AssertionError, "DID NOT RAISE")
app_raises = gateway.interp2app_temp(pypyraises)
From pypy.commits at gmail.com Mon May 2 20:23:41 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:23:41 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge oefmt (2faccce) oefmt
pypy/interpreter/
Message-ID: <5727ef8d.55301c0a.b3b3e.68db@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84153:682b98f3e672
Date: 2016-05-02 17:11 -0700
http://bitbucket.org/pypy/pypy/changeset/682b98f3e672/
Log: merge oefmt (2faccce) oefmt pypy/interpreter/
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -354,9 +354,7 @@
key = space.str_w(w_key)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,8 +16,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -399,8 +399,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -67,8 +67,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -706,8 +706,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -901,8 +900,7 @@
raise
break # done
if idx == expected_length:
- raise OperationError(self.w_ValueError,
- self.wrap("too many values to unpack"))
+ raise oefmt(self.w_ValueError, "too many values to unpack")
items[idx] = w_item
idx += 1
if idx < expected_length:
@@ -962,8 +960,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1330,8 +1328,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1351,8 +1348,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1396,20 +1392,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1555,8 +1548,8 @@
from rpython.rlib import rstring
result = w_obj.str_w(self)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1596,8 +1589,7 @@
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.isinstance_w(w_obj, self.w_str):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a string'))
+ raise oefmt(self.w_TypeError, "argument must be a string")
return self.str_w(w_obj)
def unicode_w(self, w_obj):
@@ -1608,16 +1600,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def bool_w(self, w_obj):
@@ -1636,8 +1628,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1645,8 +1637,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1654,8 +1645,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1663,8 +1653,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1673,11 +1663,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1733,17 +1721,15 @@
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
except OperationError as e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno() "
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if (not self.isinstance_w(w_fd, self.w_int) and
not self.isinstance_w(w_fd, self.w_long)):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
except OperationError as e:
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -214,9 +214,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -202,16 +202,15 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting function's dictionary to a non-dict")
- )
+ raise oefmt(space.w_TypeError,
+ "setting function's dictionary to a non-dict")
self.w_func_dict = w_dict
def descr_function__new__(space, w_subtype, w_code, w_globals,
w_name=None, w_argdefs=None, w_closure=None):
code = space.interp_w(Code, w_code)
if not space.isinstance_w(w_globals, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("expected dict"))
+ raise oefmt(space.w_TypeError, "expected dict")
if not space.is_none(w_name):
name = space.str_w(w_name)
else:
@@ -227,15 +226,15 @@
if space.is_none(w_closure) and nfreevars == 0:
closure = None
elif not space.is_w(space.type(w_closure), space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("invalid closure"))
+ raise oefmt(space.w_TypeError, "invalid closure")
else:
from pypy.interpreter.nestedscope import Cell
closure_w = space.unpackiterable(w_closure)
n = len(closure_w)
if nfreevars == 0:
- raise OperationError(space.w_ValueError, space.wrap("no closure needed"))
+ raise oefmt(space.w_ValueError, "no closure needed")
elif nfreevars != n:
- raise OperationError(space.w_ValueError, space.wrap("closure is wrong size"))
+ raise oefmt(space.w_ValueError, "closure is wrong size")
closure = [space.interp_w(Cell, w_cell) for w_cell in closure_w]
func = space.allocate_instance(Function, w_subtype)
Function.__init__(func, space, code, w_globals, defs_w, closure, name)
@@ -321,8 +320,8 @@
w_func_dict, w_module) = args_w
except ValueError:
# wrong args
- raise OperationError(space.w_ValueError,
- space.wrap("Wrong arguments to function.__setstate__"))
+ raise oefmt(space.w_ValueError,
+ "Wrong arguments to function.__setstate__")
self.space = space
self.name = space.str_w(w_name)
@@ -359,7 +358,8 @@
self.defs_w = []
return
if not space.isinstance_w(w_defaults, space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("func_defaults must be set to a tuple object or None"))
+ raise oefmt(space.w_TypeError,
+ "func_defaults must be set to a tuple object or None")
self.defs_w = space.fixedview(w_defaults)
def fdel_func_defaults(self, space):
@@ -380,8 +380,8 @@
if space.isinstance_w(w_name, space.w_str):
self.name = space.str_w(w_name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be set to a string object"))
+ raise oefmt(space.w_TypeError,
+ "__name__ must be set to a string object")
def fdel_func_doc(self, space):
self.w_doc = space.w_None
@@ -406,8 +406,8 @@
def fset_func_code(self, space, w_code):
from pypy.interpreter.pycode import PyCode
if not self.can_change_code:
- raise OperationError(space.w_TypeError,
- space.wrap("Cannot change code attribute of builtin functions"))
+ raise oefmt(space.w_TypeError,
+ "Cannot change code attribute of builtin functions")
code = space.interp_w(Code, w_code)
closure_len = 0
if self.closure:
@@ -457,8 +457,7 @@
if space.is_w(w_instance, space.w_None):
w_instance = None
if w_instance is None and space.is_none(w_class):
- raise OperationError(space.w_TypeError,
- space.wrap("unbound methods must have class"))
+ raise oefmt(space.w_TypeError, "unbound methods must have class")
method = space.allocate_instance(Method, w_subtype)
Method.__init__(method, space, w_function, w_instance, w_class)
return space.wrap(method)
@@ -659,8 +658,8 @@
self.w_module = func.w_module
def descr_builtinfunction__new__(space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("cannot create 'builtin_function' instances"))
+ raise oefmt(space.w_TypeError,
+ "cannot create 'builtin_function' instances")
def descr_function_repr(self):
return self.space.wrap('' % (self.name,))
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -21,7 +21,7 @@
from pypy.interpreter.signature import Signature
from pypy.interpreter.baseobjspace import (W_Root, ObjSpace, SpaceCache,
DescrMismatch)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import ClassMethod, FunctionWithFixedCode
from rpython.rlib import rstackovf
from rpython.rlib.objectmodel import we_are_translated
@@ -699,14 +699,13 @@
raise
raise e
except KeyboardInterrupt:
- raise OperationError(space.w_KeyboardInterrupt,
- space.w_None)
+ raise OperationError(space.w_KeyboardInterrupt, space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
- raise OperationError(space.w_RuntimeError,
- space.wrap("maximum recursion depth exceeded"))
+ raise oefmt(space.w_RuntimeError,
+ "maximum recursion depth exceeded")
except RuntimeError: # not on top of py.py
raise OperationError(space.w_RuntimeError, space.w_None)
@@ -762,8 +761,7 @@
try:
w_result = self.fastfunc_0(space)
except DescrMismatch:
- raise OperationError(space.w_SystemError,
- space.wrap("unexpected DescrMismatch error"))
+ raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
except Exception as e:
self.handle_exception(space, e)
w_result = None
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
from rpython.rlib import jit
@@ -76,8 +76,7 @@
def _send_ex(self, w_arg, operr):
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None:
# xxx a bit ad-hoc, but we don't want to go inside
@@ -89,8 +88,9 @@
last_instr = jit.promote(frame.last_instr)
if last_instr == -1:
if w_arg and not space.is_w(w_arg, space.w_None):
- msg = "can't send non-None value to a just-started generator"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can't send non-None value to a just-started "
+ "generator")
else:
if not w_arg:
w_arg = space.w_None
@@ -151,8 +151,8 @@
raise
if w_retval is not None:
- msg = "generator ignored GeneratorExit"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "generator ignored GeneratorExit")
def descr_gi_frame(self, space):
if self.frame is not None and not self.frame.frame_finished_execution:
@@ -184,8 +184,7 @@
# XXX copied and simplified version of send_ex()
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None: # already finished
return
diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
--- a/pypy/interpreter/nestedscope.py
+++ b/pypy/interpreter/nestedscope.py
@@ -1,7 +1,7 @@
from rpython.tool.uid import uid
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.mixedmodule import MixedModule
@@ -78,4 +78,4 @@
try:
return self.get()
except ValueError:
- raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
+ raise oefmt(space.w_ValueError, "Cell is empty")
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -8,7 +8,7 @@
from pypy.interpreter import eval
from pypy.interpreter.signature import Signature
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.astcompiler.consts import (
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
@@ -374,14 +374,13 @@
lnotab, w_freevars=None, w_cellvars=None,
magic=default_magic):
if argcount < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: argcount must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: argcount must not be negative")
if nlocals < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: nlocals must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: nlocals must not be negative")
if not space.isinstance_w(w_constants, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("Expected tuple for constants"))
+ raise oefmt(space.w_TypeError, "Expected tuple for constants")
consts_w = space.fixedview(w_constants)
names = unpack_str_tuple(space, w_names)
varnames = unpack_str_tuple(space, w_varnames)
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -7,7 +7,7 @@
from pypy.interpreter.pyparser import future, pyparse, error as parseerror
from pypy.interpreter.astcompiler import (astbuilder, codegen, consts, misc,
optimize, ast)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class AbstractCompiler(object):
@@ -116,8 +116,7 @@
else:
check = True
if not check:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "invalid node type"))
+ raise oefmt(self.space.w_TypeError, "invalid node type")
fut = misc.parse_future(node, self.future_flags.compiler_features)
f_flags, f_lineno, f_col = fut
@@ -132,8 +131,7 @@
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
except parseerror.SyntaxError as e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return code
def compile_to_ast(self, source, filename, mode, flags):
@@ -146,11 +144,9 @@
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
except parseerror.IndentationError as e:
- raise OperationError(space.w_IndentationError,
- e.wrap_info(space))
+ raise OperationError(space.w_IndentationError, e.wrap_info(space))
except parseerror.SyntaxError as e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return mod
def compile(self, source, filename, mode, flags, hidden_applevel=False):
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -220,9 +220,9 @@
return # no cells needed - fast path
elif outer_func is None:
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("directly executed code object "
- "may not contain free variables"))
+ raise oefmt(space.w_TypeError,
+ "directly executed code object may not contain free "
+ "variables")
if outer_func and outer_func.closure:
closure_size = len(outer_func.closure)
else:
@@ -513,7 +513,7 @@
self.locals_cells_stack_w = values_w[:]
valuestackdepth = space.int_w(w_stackdepth)
if not self._check_stack_index(valuestackdepth):
- raise OperationError(space.w_ValueError, space.wrap("invalid stackdepth"))
+ raise oefmt(space.w_ValueError, "invalid stackdepth")
assert valuestackdepth >= 0
self.valuestackdepth = valuestackdepth
if space.is_w(w_exc_value, space.w_None):
@@ -686,12 +686,11 @@
try:
new_lineno = space.int_w(w_new_lineno)
except OperationError:
- raise OperationError(space.w_ValueError,
- space.wrap("lineno must be an integer"))
+ raise oefmt(space.w_ValueError, "lineno must be an integer")
if self.get_w_f_trace() is None:
- raise OperationError(space.w_ValueError,
- space.wrap("f_lineno can only be set by a trace function."))
+ raise oefmt(space.w_ValueError,
+ "f_lineno can only be set by a trace function.")
line = self.pycode.co_firstlineno
if new_lineno < line:
@@ -718,8 +717,8 @@
# Don't jump to a line with an except in it.
code = self.pycode.co_code
if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump to 'except' line as there's no exception"))
+ raise oefmt(space.w_ValueError,
+ "can't jump to 'except' line as there's no exception")
# Don't jump into or out of a finally block.
f_lasti_setup_addr = -1
@@ -800,8 +799,8 @@
new_iblock = f_iblock - delta_iblock
if new_iblock > min_iblock:
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump into the middle of a block"))
+ raise oefmt(space.w_ValueError,
+ "can't jump into the middle of a block")
while f_iblock > new_iblock:
block = self.pop_block()
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -348,7 +348,7 @@
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={None: 1})
assert excinfo.value.w_type is TypeError
- assert excinfo.value._w_value is not None
+ assert excinfo.value._w_value is None
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={valuedummy: 1})
assert excinfo.value.w_type is ValueError
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -253,8 +253,7 @@
def unknown_objclass_getter(space):
# NB. this is an AttributeError to make inspect.py happy
- raise OperationError(space.w_AttributeError,
- space.wrap("generic property has no __objclass__"))
+ raise oefmt(space.w_AttributeError, "generic property has no __objclass__")
@specialize.arg(0)
def make_objclass_getter(tag, func, cls):
@@ -328,8 +327,7 @@
Change the value of the property of the given obj."""
fset = self.fset
if fset is None:
- raise OperationError(space.w_TypeError,
- space.wrap("readonly attribute"))
+ raise oefmt(space.w_TypeError, "readonly attribute")
try:
fset(self, space, w_obj, w_value)
except DescrMismatch:
@@ -344,8 +342,7 @@
Delete the value of the property from the given obj."""
fdel = self.fdel
if fdel is None:
- raise OperationError(space.w_AttributeError,
- space.wrap("cannot delete attribute"))
+ raise oefmt(space.w_AttributeError, "cannot delete attribute")
try:
fdel(self, space, w_obj)
except DescrMismatch:
From pypy.commits at gmail.com Mon May 2 20:23:43 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:23:43 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: merge default (oefmt pypy/interpreter/)
Message-ID: <5727ef8f.a553c20a.2fb9d.3281@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84154:a4878080a536
Date: 2016-05-02 17:18 -0700
http://bitbucket.org/pypy/pypy/changeset/a4878080a536/
Log: merge default (oefmt pypy/interpreter/)
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -387,9 +387,7 @@
key = space.identifier_w(w_key)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -15,8 +15,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -420,8 +420,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -91,8 +91,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -725,8 +725,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -986,8 +985,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1328,8 +1327,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1349,8 +1347,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1406,20 +1403,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1578,8 +1572,8 @@
from rpython.rlib import rstring
result = self.bytes_w(w_obj)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1624,16 +1618,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def identifier_w(self, w_obj):
@@ -1683,8 +1677,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1692,8 +1686,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1701,8 +1694,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1710,8 +1702,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1720,11 +1712,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1779,16 +1769,14 @@
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
except OperationError as e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno() "
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if not self.isinstance_w(w_fd, self.w_int):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
fd = self.c_int_w(w_fd) # Can raise w_OverflowError
if fd < 0:
raise oefmt(self.w_ValueError,
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -236,9 +236,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -206,16 +206,15 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting function's dictionary to a non-dict")
- )
+ raise oefmt(space.w_TypeError,
+ "setting function's dictionary to a non-dict")
self.w_func_dict = w_dict
def descr_function__new__(space, w_subtype, w_code, w_globals,
w_name=None, w_argdefs=None, w_closure=None):
code = space.interp_w(Code, w_code)
if not space.isinstance_w(w_globals, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("expected dict"))
+ raise oefmt(space.w_TypeError, "expected dict")
if not space.is_none(w_name):
name = space.str_w(w_name)
else:
@@ -231,15 +230,15 @@
if space.is_none(w_closure) and nfreevars == 0:
closure = None
elif not space.is_w(space.type(w_closure), space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("invalid closure"))
+ raise oefmt(space.w_TypeError, "invalid closure")
else:
from pypy.interpreter.nestedscope import Cell
closure_w = space.unpackiterable(w_closure)
n = len(closure_w)
if nfreevars == 0:
- raise OperationError(space.w_ValueError, space.wrap("no closure needed"))
+ raise oefmt(space.w_ValueError, "no closure needed")
elif nfreevars != n:
- raise OperationError(space.w_ValueError, space.wrap("closure is wrong size"))
+ raise oefmt(space.w_ValueError, "closure is wrong size")
closure = [space.interp_w(Cell, w_cell) for w_cell in closure_w]
func = space.allocate_instance(Function, w_subtype)
Function.__init__(func, space, code, w_globals, defs_w, None, closure,
@@ -327,8 +326,8 @@
w_defs, w_func_dict, w_module) = args_w
except ValueError:
# wrong args
- raise OperationError(space.w_ValueError,
- space.wrap("Wrong arguments to function.__setstate__"))
+ raise oefmt(space.w_ValueError,
+ "Wrong arguments to function.__setstate__")
self.space = space
self.name = space.str_w(w_name)
@@ -366,7 +365,8 @@
self.defs_w = []
return
if not space.isinstance_w(w_defaults, space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("func_defaults must be set to a tuple object or None"))
+ raise oefmt(space.w_TypeError,
+ "func_defaults must be set to a tuple object or None")
self.defs_w = space.fixedview(w_defaults)
def fdel_func_defaults(self, space):
@@ -403,8 +403,8 @@
if space.isinstance_w(w_name, space.w_unicode):
self.name = space.str_w(w_name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be set to a string object"))
+ raise oefmt(space.w_TypeError,
+ "__name__ must be set to a string object")
def fget_func_qualname(self, space):
return space.wrap(self.qualname)
@@ -442,8 +442,8 @@
def fset_func_code(self, space, w_code):
from pypy.interpreter.pycode import PyCode
if not self.can_change_code:
- raise OperationError(space.w_TypeError,
- space.wrap("Cannot change code attribute of builtin functions"))
+ raise oefmt(space.w_TypeError,
+ "Cannot change code attribute of builtin functions")
code = space.interp_w(Code, w_code)
closure_len = 0
if self.closure:
@@ -502,8 +502,7 @@
if space.is_w(w_instance, space.w_None):
w_instance = None
if w_instance is None:
- raise OperationError(space.w_TypeError,
- space.wrap("self must not be None"))
+ raise oefmt(space.w_TypeError, "self must not be None")
method = space.allocate_instance(Method, w_subtype)
Method.__init__(method, space, w_function, w_instance)
return space.wrap(method)
@@ -647,8 +646,8 @@
self.w_module = func.w_module
def descr_builtinfunction__new__(space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("cannot create 'builtin_function' instances"))
+ raise oefmt(space.w_TypeError,
+ "cannot create 'builtin_function' instances")
def descr_function_repr(self):
return self.space.wrap('' % (self.name,))
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -21,7 +21,7 @@
from pypy.interpreter.signature import Signature
from pypy.interpreter.baseobjspace import (W_Root, ObjSpace, SpaceCache,
DescrMismatch)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import ClassMethod, FunctionWithFixedCode
from rpython.rlib import rstackovf
from rpython.rlib.objectmodel import we_are_translated
@@ -715,14 +715,13 @@
raise
raise e
except KeyboardInterrupt:
- raise OperationError(space.w_KeyboardInterrupt,
- space.w_None)
+ raise OperationError(space.w_KeyboardInterrupt, space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
- raise OperationError(space.w_RuntimeError,
- space.wrap("maximum recursion depth exceeded"))
+ raise oefmt(space.w_RuntimeError,
+ "maximum recursion depth exceeded")
except RuntimeError: # not on top of py.py
raise OperationError(space.w_RuntimeError, space.w_None)
@@ -778,8 +777,7 @@
try:
w_result = self.fastfunc_0(space)
except DescrMismatch:
- raise OperationError(space.w_SystemError,
- space.wrap("unexpected DescrMismatch error"))
+ raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
except Exception as e:
self.handle_exception(space, e)
w_result = None
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
from rpython.rlib import jit
@@ -76,8 +76,7 @@
def _send_ex(self, w_arg, operr):
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None:
# xxx a bit ad-hoc, but we don't want to go inside
@@ -89,8 +88,9 @@
last_instr = jit.promote(frame.last_instr)
if last_instr == -1:
if w_arg and not space.is_w(w_arg, space.w_None):
- msg = "can't send non-None value to a just-started generator"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can't send non-None value to a just-started "
+ "generator")
else:
if not w_arg:
w_arg = space.w_None
@@ -226,8 +226,8 @@
raise
if w_retval is not None:
- msg = "generator ignored GeneratorExit"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "generator ignored GeneratorExit")
def descr_gi_frame(self, space):
if self.frame is not None and not self.frame.frame_finished_execution:
@@ -259,8 +259,7 @@
# XXX copied and simplified version of send_ex()
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None: # already finished
return
diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
--- a/pypy/interpreter/nestedscope.py
+++ b/pypy/interpreter/nestedscope.py
@@ -1,7 +1,7 @@
from rpython.tool.uid import uid
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.mixedmodule import MixedModule
@@ -84,4 +84,4 @@
try:
return self.get()
except ValueError:
- raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
+ raise oefmt(space.w_ValueError, "Cell is empty")
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -8,7 +8,7 @@
from pypy.interpreter import eval
from pypy.interpreter.signature import Signature
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.astcompiler.consts import (
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
@@ -381,17 +381,16 @@
lnotab, w_freevars=None, w_cellvars=None,
magic=default_magic):
if argcount < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: argcount must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: argcount must not be negative")
if kwonlyargcount < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: kwonlyargcount must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: kwonlyargcount must not be negative")
if nlocals < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: nlocals must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: nlocals must not be negative")
if not space.isinstance_w(w_constants, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("Expected tuple for constants"))
+ raise oefmt(space.w_TypeError, "Expected tuple for constants")
consts_w = space.fixedview(w_constants)
names = unpack_str_tuple(space, w_names)
varnames = unpack_str_tuple(space, w_varnames)
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -7,7 +7,7 @@
from pypy.interpreter.pyparser import future, pyparse, error as parseerror
from pypy.interpreter.astcompiler import (astbuilder, codegen, consts, misc,
optimize, ast, validate)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class AbstractCompiler(object):
@@ -116,8 +116,7 @@
else:
check = True
if not check:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "invalid node type"))
+ raise oefmt(self.space.w_TypeError, "invalid node type")
fut = misc.parse_future(node, self.future_flags.compiler_features)
f_flags, f_lineno, f_col = fut
@@ -133,8 +132,7 @@
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
except parseerror.SyntaxError as e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return code
def validate_ast(self, node):
@@ -157,11 +155,9 @@
raise OperationError(space.w_TabError,
e.wrap_info(space))
except parseerror.IndentationError as e:
- raise OperationError(space.w_IndentationError,
- e.wrap_info(space))
+ raise OperationError(space.w_IndentationError, e.wrap_info(space))
except parseerror.SyntaxError as e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return mod
def compile(self, source, filename, mode, flags, hidden_applevel=False,
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -220,9 +220,9 @@
return # no cells needed - fast path
elif outer_func is None:
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("directly executed code object "
- "may not contain free variables"))
+ raise oefmt(space.w_TypeError,
+ "directly executed code object may not contain free "
+ "variables")
if outer_func and outer_func.closure:
closure_size = len(outer_func.closure)
else:
@@ -513,7 +513,7 @@
self.locals_cells_stack_w = values_w[:]
valuestackdepth = space.int_w(w_stackdepth)
if not self._check_stack_index(valuestackdepth):
- raise OperationError(space.w_ValueError, space.wrap("invalid stackdepth"))
+ raise oefmt(space.w_ValueError, "invalid stackdepth")
assert valuestackdepth >= 0
self.valuestackdepth = valuestackdepth
if space.is_w(w_exc_value, space.w_None):
@@ -686,12 +686,11 @@
try:
new_lineno = space.int_w(w_new_lineno)
except OperationError:
- raise OperationError(space.w_ValueError,
- space.wrap("lineno must be an integer"))
+ raise oefmt(space.w_ValueError, "lineno must be an integer")
if self.get_w_f_trace() is None:
- raise OperationError(space.w_ValueError,
- space.wrap("f_lineno can only be set by a trace function."))
+ raise oefmt(space.w_ValueError,
+ "f_lineno can only be set by a trace function.")
line = self.pycode.co_firstlineno
if new_lineno < line:
@@ -718,8 +717,8 @@
# Don't jump to a line with an except in it.
code = self.pycode.co_code
if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump to 'except' line as there's no exception"))
+ raise oefmt(space.w_ValueError,
+ "can't jump to 'except' line as there's no exception")
# Don't jump into or out of a finally block.
f_lasti_setup_addr = -1
@@ -800,8 +799,8 @@
new_iblock = f_iblock - delta_iblock
if new_iblock > min_iblock:
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump into the middle of a block"))
+ raise oefmt(space.w_ValueError,
+ "can't jump into the middle of a block")
while f_iblock > new_iblock:
block = self.pop_block()
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -355,7 +355,7 @@
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={None: 1})
assert excinfo.value.w_type is TypeError
- assert excinfo.value._w_value is not None
+ assert excinfo.value._w_value is None
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={valuedummy: 1})
assert excinfo.value.w_type is ValueError
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -252,8 +252,7 @@
def unknown_objclass_getter(space):
# NB. this is an AttributeError to make inspect.py happy
- raise OperationError(space.w_AttributeError,
- space.wrap("generic property has no __objclass__"))
+ raise oefmt(space.w_AttributeError, "generic property has no __objclass__")
@specialize.arg(0)
def make_objclass_getter(tag, func, cls):
@@ -327,8 +326,7 @@
Change the value of the property of the given obj."""
fset = self.fset
if fset is None:
- raise OperationError(space.w_AttributeError,
- space.wrap("readonly attribute"))
+ raise oefmt(space.w_AttributeError, "readonly attribute")
try:
fset(self, space, w_obj, w_value)
except DescrMismatch:
@@ -343,8 +341,7 @@
Delete the value of the property from the given obj."""
fdel = self.fdel
if fdel is None:
- raise OperationError(space.w_AttributeError,
- space.wrap("cannot delete attribute"))
+ raise oefmt(space.w_AttributeError, "cannot delete attribute")
try:
fdel(self, space, w_obj)
except DescrMismatch:
From pypy.commits at gmail.com Mon May 2 20:31:18 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:31:18 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: merge default (oefmt pypy/{objspace,
tool}/)
Message-ID: <5727f156.ce9d1c0a.cf851.ffffdbaf@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84156:35dcdbf2fb5d
Date: 2016-05-02 17:27 -0700
http://bitbucket.org/pypy/pypy/changeset/35dcdbf2fb5d/
Log: merge default (oefmt pypy/{objspace,tool}/)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -294,8 +294,7 @@
w_iter = space.get_and_call_function(w_descr, w_obj)
w_next = space.lookup(w_iter, '__next__')
if w_next is None:
- raise OperationError(space.w_TypeError,
- space.wrap("iter() returned non-iterator"))
+ raise oefmt(space.w_TypeError, "iter() returned non-iterator")
return w_iter
def next(space, w_obj):
@@ -370,8 +369,7 @@
if _check_notimplemented(space, w_res):
return w_res
- raise OperationError(space.w_TypeError,
- space.wrap("operands do not support **"))
+ raise oefmt(space.w_TypeError, "operands do not support **")
def inplace_pow(space, w_lhs, w_rhs):
w_impl = space.lookup(w_lhs, '__ipow__')
@@ -475,8 +473,7 @@
def issubtype_allow_override(space, w_sub, w_type):
w_check = space.lookup(w_type, "__subclasscheck__")
if w_check is None:
- raise OperationError(space.w_TypeError,
- space.wrap("issubclass not supported here"))
+ raise oefmt(space.w_TypeError, "issubclass not supported here")
return space.get_and_call_function(w_check, w_type, w_sub)
def isinstance_allow_override(space, w_inst, w_type):
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -63,7 +63,8 @@
if space.is_w(space.type(w_key), space.w_unicode):
self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
else:
- raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type"))
+ raise oefmt(space.w_TypeError,
+ "cannot add non-string keys to dict of a type")
def setitem_str(self, w_dict, key, w_value):
w_type = self.unerase(w_dict.dstorage)
diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py
--- a/pypy/objspace/std/formatting.py
+++ b/pypy/objspace/std/formatting.py
@@ -28,27 +28,24 @@
try:
w_result = self.values_w[self.values_pos]
except IndexError:
- space = self.space
- raise OperationError(space.w_TypeError, space.wrap(
- 'not enough arguments for format string'))
+ raise oefmt(self.space.w_TypeError,
+ "not enough arguments for format string")
else:
self.values_pos += 1
return w_result
def checkconsumed(self):
if self.values_pos < len(self.values_w) and self.w_valuedict is None:
- space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap('not all arguments converted '
- 'during string formatting'))
+ raise oefmt(self.space.w_TypeError,
+ "not all arguments converted during string formatting")
def std_wp_int(self, r, prefix=''):
# use self.prec to add some '0' on the left of the number
if self.prec >= 0:
if self.prec > 1000:
- raise OperationError(
- self.space.w_OverflowError, self.space.wrap(
- 'formatted integer is too long (precision too large?)'))
+ raise oefmt(self.space.w_OverflowError,
+ "formatted integer is too long (precision too "
+ "large?)")
sign = r[0] == '-'
padding = self.prec - (len(r)-int(sign))
if padding > 0:
@@ -164,9 +161,7 @@
try:
return self.fmt[self.fmtpos]
except IndexError:
- space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("incomplete format"))
+ raise oefmt(self.space.w_ValueError, "incomplete format")
# Only shows up if we've already started inlining format(), so just
# unconditionally unroll this.
@@ -182,8 +177,7 @@
c = fmt[i]
except IndexError:
space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("incomplete format key"))
+ raise oefmt(space.w_ValueError, "incomplete format key")
if c == ')':
pcount -= 1
if pcount == 0:
@@ -198,8 +192,7 @@
# return the value corresponding to a key in the input dict
space = self.space
if self.w_valuedict is None:
- raise OperationError(space.w_TypeError,
- space.wrap("format requires a mapping"))
+ raise oefmt(space.w_TypeError, "format requires a mapping")
w_key = space.wrap(key)
return space.getitem(self.w_valuedict, w_key)
@@ -341,9 +334,9 @@
s = space.str_w(w_s)
else:
s = c
- msg = "unsupported format character '%s' (0x%x) at index %d" % (
- s, ord(c), self.fmtpos - 1)
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "unsupported format character '%s' (%s) at index %d",
+ s, hex(ord(c)), self.fmtpos - 1)
def std_wp(self, r):
length = len(r)
@@ -428,9 +421,8 @@
space = self.space
w_impl = space.lookup(w_value, '__str__')
if w_impl is None:
- raise OperationError(space.w_TypeError,
- space.wrap("operand does not support "
- "unary str"))
+ raise oefmt(space.w_TypeError,
+ "operand does not support unary str")
w_result = space.get_and_call_function(w_impl, w_value)
if space.isinstance_w(w_result,
space.w_unicode):
@@ -468,16 +460,14 @@
if space.isinstance_w(w_value, space.w_str):
s = space.str_w(w_value)
if len(s) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("%c requires int or char"))
+ raise oefmt(space.w_TypeError, "%c requires int or char")
self.std_wp(s)
elif space.isinstance_w(w_value, space.w_unicode):
if not do_unicode:
raise NeedUnicodeFormattingError
ustr = space.unicode_w(w_value)
if len(ustr) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("%c requires int or unichar"))
+ raise oefmt(space.w_TypeError, "%c requires int or unichar")
self.std_wp(ustr)
else:
n = space.int_w(w_value)
@@ -485,15 +475,15 @@
try:
c = unichr(n)
except ValueError:
- raise OperationError(space.w_OverflowError,
- space.wrap("unicode character code out of range"))
+ raise oefmt(space.w_OverflowError,
+ "unicode character code out of range")
self.std_wp(c)
else:
try:
s = chr(n)
- except ValueError: # chr(out-of-range)
- raise OperationError(space.w_OverflowError,
- space.wrap("character code not in range(256)"))
+ except ValueError:
+ raise oefmt(space.w_OverflowError,
+ "character code not in range(256)")
self.std_wp(s)
return StringFormatter
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -565,8 +565,7 @@
index = space.getindex_w(w_index, space.w_IndexError, "list index")
return self.getitem(index)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_setitem(self, space, w_index, w_any):
if isinstance(w_index, W_SliceObject):
@@ -584,8 +583,7 @@
try:
self.setitem(idx, w_any)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_delitem(self, space, w_idx):
if isinstance(w_idx, W_SliceObject):
@@ -600,8 +598,7 @@
try:
self.pop(idx)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
def descr_reversed(self, space):
'L.__reversed__() -- return a reverse iterator over the list'
@@ -636,8 +633,7 @@
index (default last)'''
length = self.length()
if length == 0:
- raise OperationError(space.w_IndexError,
- space.wrap("pop from empty list"))
+ raise oefmt(space.w_IndexError, "pop from empty list")
# clearly differentiate between list.pop() and list.pop(index)
if index == -1:
return self.pop_end() # cannot raise because list is not empty
@@ -646,8 +642,7 @@
try:
return self.pop(index)
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("pop index out of range"))
+ raise oefmt(space.w_IndexError, "pop index out of range")
def descr_clear(self, space):
'''L.clear() -- remove all items'''
@@ -748,8 +743,7 @@
self.__init__(space, sorter.list)
if mucked:
- raise OperationError(space.w_ValueError,
- space.wrap("list modified during sort"))
+ raise oefmt(space.w_ValueError, "list modified during sort")
find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find')
@@ -1468,14 +1462,15 @@
def setslice(self, w_list, start, step, slicelength, w_other):
assert slicelength >= 0
+ space = self.space
- if self is self.space.fromcache(ObjectListStrategy):
+ if self is space.fromcache(ObjectListStrategy):
w_other = w_other._temporarily_as_objects()
elif not self.list_is_correct_type(w_other) and w_other.length() != 0:
w_list.switch_to_object_strategy()
w_other_as_object = w_other._temporarily_as_objects()
assert (w_other_as_object.strategy is
- self.space.fromcache(ObjectListStrategy))
+ space.fromcache(ObjectListStrategy))
w_list.setslice(start, step, slicelength, w_other_as_object)
return
@@ -1501,7 +1496,7 @@
assert start >= 0
del items[start:start + delta]
elif len2 != slicelength: # No resize for extended slices
- raise oefmt(self.space.w_ValueError,
+ raise oefmt(space.w_ValueError,
"attempt to assign sequence of size %d to extended "
"slice of size %d", len2, slicelength)
@@ -2099,8 +2094,8 @@
result = space.int_w(w_result)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(space.w_TypeError,
- space.wrap("comparison function must return int"))
+ raise oefmt(space.w_TypeError,
+ "comparison function must return int")
raise
return result < 0
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -561,12 +561,11 @@
@objectmodel.dont_inline
def _obj_setdict(self, space, w_dict):
- from pypy.interpreter.error import OperationError
+ from pypy.interpreter.error import oefmt
terminator = self._get_mapdict_map().terminator
assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator)
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting dictionary to a non-dict"))
+ raise oefmt(space.w_TypeError, "setting dictionary to a non-dict")
assert isinstance(w_dict, W_DictMultiObject)
w_olddict = self.getdict(space)
assert isinstance(w_olddict, W_DictMultiObject)
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -64,8 +64,7 @@
else:
out = rstring.StringBuilder()
if not level:
- raise OperationError(space.w_ValueError,
- space.wrap("Recursion depth exceeded"))
+ raise oefmt(space.w_ValueError, "Recursion depth exceeded")
level -= 1
s = self.template
return self._do_build_string(start, end, level, out, s)
@@ -83,14 +82,12 @@
markup_follows = True
if c == "}":
if at_end or s[i] != "}":
- raise OperationError(space.w_ValueError,
- space.wrap("Single '}'"))
+ raise oefmt(space.w_ValueError, "Single '}'")
i += 1
markup_follows = False
if c == "{":
if at_end:
- raise OperationError(space.w_ValueError,
- space.wrap("Single '{'"))
+ raise oefmt(space.w_ValueError, "Single '{'")
if s[i] == "{":
i += 1
markup_follows = False
@@ -122,8 +119,7 @@
break
i += 1
if nested:
- raise OperationError(space.w_ValueError,
- space.wrap("Unmatched '{'"))
+ raise oefmt(space.w_ValueError, "Unmatched '{'")
rendered = self._render_field(field_start, i, recursive, level)
out.append(rendered)
i += 1
@@ -145,16 +141,15 @@
if c == "!":
i += 1
if i == end:
- w_msg = self.space.wrap("expected conversion")
- raise OperationError(self.space.w_ValueError, w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "expected conversion")
conversion = s[i]
i += 1
if i < end:
if s[i] != ':':
- w_msg = self.space.wrap("expected ':' after"
- " format specifier")
- raise OperationError(self.space.w_ValueError,
- w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "expected ':' after format "
+ "specifier")
i += 1
else:
conversion = None
@@ -190,13 +185,12 @@
if use_numeric:
if self.auto_numbering_state == ANS_MANUAL:
if empty:
- msg = "switching from manual to automatic numbering"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "switching from manual to automatic "
+ "numbering")
elif not empty:
- msg = "switching from automatic to manual numbering"
- raise OperationError(space.w_ValueError,
- space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "switching from automatic to manual numbering")
if empty:
index = self.auto_numbering
self.auto_numbering += 1
@@ -219,8 +213,7 @@
try:
w_arg = self.args[index]
except IndexError:
- w_msg = space.wrap("index out of range")
- raise OperationError(space.w_IndexError, w_msg)
+ raise oefmt(space.w_IndexError, "out of range")
return self._resolve_lookups(w_arg, name, i, end)
@jit.unroll_safe
@@ -239,8 +232,8 @@
break
i += 1
if start == i:
- w_msg = space.wrap("Empty attribute in format string")
- raise OperationError(space.w_ValueError, w_msg)
+ raise oefmt(space.w_ValueError,
+ "Empty attribute in format string")
w_attr = space.wrap(name[start:i])
if w_obj is not None:
w_obj = space.getattr(w_obj, w_attr)
@@ -258,8 +251,7 @@
break
i += 1
if not got_bracket:
- raise OperationError(space.w_ValueError,
- space.wrap("Missing ']'"))
+ raise oefmt(space.w_ValueError, "Missing ']'")
index, reached = _parse_int(self.space, name, start, i)
if index != -1 and reached == i:
w_item = space.wrap(index)
@@ -272,8 +264,8 @@
self.parser_list_w.append(space.newtuple([
space.w_False, w_item]))
else:
- msg = "Only '[' and '.' may follow ']'"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Only '[' and '.' may follow ']'")
return w_obj
def formatter_field_name_split(self):
@@ -316,8 +308,7 @@
from pypy.objspace.std.unicodeobject import ascii_from_object
return ascii_from_object(space, w_obj)
else:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("invalid conversion"))
+ raise oefmt(space.w_ValueError, "invalid conversion")
def _render_field(self, start, end, recursive, level):
name, conversion, spec_start = self._parse_field(start, end)
@@ -476,19 +467,17 @@
i += 1
self._precision, i = _parse_int(self.space, spec, i, length)
if self._precision == -1:
- raise OperationError(space.w_ValueError,
- space.wrap("no precision given"))
+ raise oefmt(space.w_ValueError, "no precision given")
if length - i > 1:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid format spec"))
+ raise oefmt(space.w_ValueError, "invalid format spec")
if length - i == 1:
presentation_type = spec[i]
if self.is_unicode:
try:
the_type = spec[i].encode("ascii")[0]
except UnicodeEncodeError:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid presentation type"))
+ raise oefmt(space.w_ValueError,
+ "invalid presentation type")
else:
the_type = presentation_type
i += 1
@@ -507,8 +496,7 @@
# ok
pass
else:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid type with ','"))
+ raise oefmt(space.w_ValueError, "invalid type with ','")
return False
def _calc_padding(self, string, length):
@@ -551,9 +539,8 @@
return rstring.StringBuilder()
def _unknown_presentation(self, tp):
- msg = "unknown presentation for %s: '%s'"
- w_msg = self.space.wrap(msg % (tp, self._type))
- raise OperationError(self.space.w_ValueError, w_msg)
+ raise oefmt(self.space.w_ValueError,
+ "unknown presentation for %s: '%s'", tp, self._type)
def format_string(self, w_string):
space = self.space
@@ -565,14 +552,16 @@
if self._type != "s":
self._unknown_presentation("string")
if self._sign != "\0":
- msg = "Sign not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Sign not allowed in string format specifier")
if self._alternate:
- msg = "Alternate form (#) not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Alternate form (#) not allowed in string format "
+ "specifier")
if self._align == "=":
- msg = "'=' alignment not allowed in string format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "'=' alignment not allowed in string format "
+ "specifier")
length = len(string)
precision = self._precision
if precision != -1 and length >= precision:
@@ -770,14 +759,14 @@
def _format_int_or_long(self, w_num, kind):
space = self.space
if self._precision != -1:
- msg = "precision not allowed in integer type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "precision not allowed in integer type")
sign_char = "\0"
tp = self._type
if tp == "c":
if self._sign != "\0":
- msg = "sign not allowed with 'c' presentation type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "sign not allowed with 'c' presentation type")
value = space.int_w(w_num)
if self.is_unicode:
result = runicode.UNICHR(value)
@@ -1000,13 +989,14 @@
default_precision = 6
if self._align == "=":
# '=' alignment is invalid
- msg = ("'=' alignment flag is not allowed in"
- " complex format specifier")
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "'=' alignment flag is not allowed in complex "
+ "format specifier")
if self._fill_char == "0":
- #zero padding is invalid
- msg = "Zero padding is not allowed in complex format specifier"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ # zero padding is invalid
+ raise oefmt(space.w_ValueError,
+ "Zero padding is not allowed in complex format "
+ "specifier")
if self._alternate:
flags |= rfloat.DTSF_ALT
diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py
--- a/pypy/objspace/std/objectobject.py
+++ b/pypy/objspace/std/objectobject.py
@@ -195,8 +195,7 @@
elif space.isinstance_w(w_format_spec, space.w_str):
w_as_str = space.str(w_obj)
else:
- msg = "format_spec must be a string"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "format_spec must be a string")
if space.len_w(w_format_spec) > 0:
msg = "object.__format__ with a non-empty format string is deprecated"
space.warn(space.wrap(msg), space.w_DeprecationWarning)
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -532,8 +532,7 @@
w_tup = self.call_function(w_indices, w_length)
l_w = self.unpackiterable(w_tup)
if not len(l_w) == 3:
- raise OperationError(self.w_ValueError,
- self.wrap("Expected tuple of length 3"))
+ raise oefmt(self.w_ValueError, "Expected tuple of length 3")
return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2])
_DescrOperation_is_true = is_true
@@ -646,13 +645,12 @@
def _type_issubtype(self, w_sub, w_type):
if isinstance(w_sub, W_TypeObject) and isinstance(w_type, W_TypeObject):
return self.wrap(w_sub.issubtype(w_type))
- raise OperationError(self.w_TypeError, self.wrap("need type objects"))
+ raise oefmt(self.w_TypeError, "need type objects")
@specialize.arg_or_var(2)
def _type_isinstance(self, w_inst, w_type):
if not isinstance(w_type, W_TypeObject):
- raise OperationError(self.w_TypeError,
- self.wrap("need type object"))
+ raise oefmt(self.w_TypeError, "need type object")
if is_annotation_constant(w_type):
cls = self._get_interplevel_cls(w_type)
if cls is not None:
diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py
--- a/pypy/objspace/std/proxyobject.py
+++ b/pypy/objspace/std/proxyobject.py
@@ -1,7 +1,7 @@
""" transparent list implementation
"""
from pypy.interpreter import baseobjspace
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
def transparent_class(name, BaseCls):
@@ -20,8 +20,9 @@
return self.w_type
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("You cannot override __class__ for transparent proxies"))
+ raise oefmt(space.w_TypeError,
+ "You cannot override __class__ for transparent "
+ "proxies")
def getdictvalue(self, space, attr):
try:
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -1,6 +1,6 @@
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.signature import Signature
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std.bytesobject import W_BytesObject
@@ -820,8 +820,7 @@
return EmptyIteratorImplementation(self.space, self, w_set)
def popitem(self, w_set):
- raise OperationError(self.space.w_KeyError,
- self.space.wrap('pop from an empty set'))
+ raise oefmt(self.space.w_KeyError, "pop from an empty set")
class AbstractUnwrappedSetStrategy(object):
@@ -1178,8 +1177,7 @@
result = storage.popitem()
except KeyError:
# strategy may still be the same even if dict is empty
- raise OperationError(self.space.w_KeyError,
- self.space.wrap('pop from an empty set'))
+ raise oefmt(self.space.w_KeyError, "pop from an empty set")
return self.wrap(result[0])
@@ -1401,8 +1399,8 @@
return None
if self.len != self.setimplementation.length():
self.len = -1 # Make this error state sticky
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("set changed size during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "set changed size during iteration")
# look for the next entry
if self.pos < self.len:
result = self.next_entry()
@@ -1415,8 +1413,8 @@
# We try to explicitly look it up in the set.
if not self.setimplementation.has_key(result):
self.len = -1 # Make this error state sticky
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("dictionary changed during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "dictionary changed during iteration")
return result
# no more entries
self.setimplementation = None
diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py
--- a/pypy/objspace/std/sliceobject.py
+++ b/pypy/objspace/std/sliceobject.py
@@ -3,7 +3,7 @@
import sys
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import GetSetProperty, TypeDef
from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
@@ -29,8 +29,7 @@
else:
step = _eval_slice_index(space, w_slice.w_step)
if step == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("slice step cannot be zero"))
+ raise oefmt(space.w_ValueError, "slice step cannot be zero")
if space.is_w(w_slice.w_start, space.w_None):
if step < 0:
start = length - 1
@@ -98,11 +97,9 @@
elif len(args_w) == 3:
w_start, w_stop, w_step = args_w
elif len(args_w) > 3:
- raise OperationError(space.w_TypeError,
- space.wrap("slice() takes at most 3 arguments"))
+ raise oefmt(space.w_TypeError, "slice() takes at most 3 arguments")
else:
- raise OperationError(space.w_TypeError,
- space.wrap("slice() takes at least 1 argument"))
+ raise oefmt(space.w_TypeError, "slice() takes at least 1 argument")
w_obj = space.allocate_instance(W_SliceObject, w_slicetype)
W_SliceObject.__init__(w_obj, w_start, w_stop, w_step)
return w_obj
@@ -166,8 +163,7 @@
def fget(space, w_obj):
from pypy.objspace.std.sliceobject import W_SliceObject
if not isinstance(w_obj, W_SliceObject):
- raise OperationError(space.w_TypeError,
- space.wrap("descriptor is for 'slice'"))
+ raise oefmt(space.w_TypeError, "descriptor is for 'slice'")
return getattr(w_obj, name)
return GetSetProperty(fget)
@@ -200,9 +196,9 @@
except OperationError as err:
if not err.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError,
- space.wrap("slice indices must be integers or "
- "None or have an __index__ method"))
+ raise oefmt(space.w_TypeError,
+ "slice indices must be integers or None or have an "
+ "__index__ method")
def adapt_lower_bound(space, size, w_index):
index = _eval_slice_index(space, w_index)
diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py
--- a/pypy/objspace/std/specialisedtupleobject.py
+++ b/pypy/objspace/std/specialisedtupleobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.objspace.std.tupleobject import W_AbstractTupleObject
from pypy.objspace.std.util import negate
from rpython.rlib.objectmodel import specialize
@@ -123,8 +123,7 @@
if typetuple[i] != object:
value = space.wrap(value)
return value
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
cls.__name__ = ('W_SpecialisedTupleObject_' +
''.join([t.__name__[0] for t in typetuple]))
@@ -187,8 +186,7 @@
def specialized_zip_2_lists(space, w_list1, w_list2):
from pypy.objspace.std.listobject import W_ListObject
if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject:
- raise OperationError(space.w_TypeError,
- space.wrap("expected two exact lists"))
+ raise oefmt(space.w_TypeError, "expected two exact lists")
if space.config.objspace.std.withspecialisedtuple:
intlist1 = w_list1.getitems_int()
diff --git a/pypy/objspace/std/transparent.py b/pypy/objspace/std/transparent.py
--- a/pypy/objspace/std/transparent.py
+++ b/pypy/objspace/std/transparent.py
@@ -49,7 +49,7 @@
Return something that looks like it is of type typ. Its behaviour is
completely controlled by the controller."""
if not space.is_true(space.callable(w_controller)):
- raise OperationError(space.w_TypeError, space.wrap("controller should be function"))
+ raise oefmt(space.w_TypeError, "controller should be function")
if isinstance(w_type, W_TypeObject):
if space.is_true(space.issubtype(w_type, space.gettypeobject(Function.typedef))):
@@ -65,7 +65,7 @@
if w_type.layout.typedef is space.w_object.layout.typedef:
return W_Transparent(space, w_type, w_controller)
else:
- raise OperationError(space.w_TypeError, space.wrap("type expected as first argument"))
+ raise oefmt(space.w_TypeError, "type expected as first argument")
w_lookup = w_type
for k, v in type_cache.cache:
if w_lookup == k:
diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py
--- a/pypy/objspace/std/tupleobject.py
+++ b/pypy/objspace/std/tupleobject.py
@@ -3,7 +3,7 @@
import sys
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import TypeDef
@@ -210,8 +210,7 @@
w_item = self.tolist()[i]
if space.eq_w(w_item, w_obj):
return space.wrap(i)
- raise OperationError(space.w_ValueError,
- space.wrap("tuple.index(x): x not in tuple"))
+ raise oefmt(space.w_ValueError, "tuple.index(x): x not in tuple")
W_AbstractTupleObject.typedef = TypeDef(
"tuple",
@@ -322,8 +321,7 @@
try:
return self.wrappeditems[index]
except IndexError:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
def wraptuple(space, list_w):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -1,7 +1,7 @@
import weakref
from pypy.interpreter import gateway
from pypy.interpreter.baseobjspace import W_Root, SpaceCache
-from pypy.interpreter.error import oefmt, OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import (
Function, StaticMethod, ClassMethod, FunctionWithFixedCode)
from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py
--- a/pypy/tool/pytest/appsupport.py
+++ b/pypy/tool/pytest/appsupport.py
@@ -2,7 +2,7 @@
import py
from pypy.interpreter import gateway, pycode
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
try:
from _pytest.assertion.newinterpret import interpret
@@ -236,9 +236,8 @@
args_w, kwds_w = __args__.unpack()
if space.isinstance_w(w_expr, space.w_unicode):
if args_w:
- raise OperationError(space.w_TypeError,
- space.wrap("raises() takes no argument "
- "after a string expression"))
+ raise oefmt(space.w_TypeError,
+ "raises() takes no argument after a string expression")
expr = space.unwrap(w_expr)
source = py.code.Source(expr)
frame = space.getexecutioncontext().gettopframe()
@@ -268,8 +267,7 @@
if e.match(space, w_ExpectedException):
return _exc_info(space, e)
raise
- raise OperationError(space.w_AssertionError,
- space.wrap("DID NOT RAISE"))
+ raise oefmt(space.w_AssertionError, "DID NOT RAISE")
app_raises = gateway.interp2app_temp(pypyraises)
From pypy.commits at gmail.com Mon May 2 20:31:20 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:31:20 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge oefmt (18b5bfb) oefmt
pypy/module/_*
Message-ID: <5727f158.cbb81c0a.5d920.ffffd7ed@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84157:ceb5cc90b42e
Date: 2016-05-02 17:29 -0700
http://bitbucket.org/pypy/pypy/changeset/ceb5cc90b42e/
Log: merge oefmt (18b5bfb) oefmt pypy/module/_*
diff too long, truncating to 2000 out of 2996 lines
diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py
--- a/pypy/module/__builtin__/compiling.py
+++ b/pypy/module/__builtin__/compiling.py
@@ -3,7 +3,7 @@
"""
from pypy.interpreter.pycode import PyCode
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.astcompiler import consts, ast
from pypy.interpreter.gateway import unwrap_spec
@@ -26,8 +26,7 @@
if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST |
consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8 |
consts.PyCF_ACCEPT_NULL_BYTES):
- raise OperationError(space.w_ValueError,
- space.wrap("compile() unrecognized flags"))
+ raise oefmt(space.w_ValueError, "compile() unrecognized flags")
if not dont_inherit:
caller = ec.gettopframe_nohidden()
@@ -35,9 +34,8 @@
flags |= ec.compiler.getcodeflags(caller.getcode())
if mode not in ('exec', 'eval', 'single'):
- raise OperationError(space.w_ValueError,
- space.wrap("compile() arg 3 must be 'exec' "
- "or 'eval' or 'single'"))
+ raise oefmt(space.w_ValueError,
+ "compile() arg 3 must be 'exec' or 'eval' or 'single'")
if space.isinstance_w(w_source, space.gettypeobject(ast.W_AST.typedef)):
ast_node = ast.mod.from_object(space, w_source)
@@ -55,8 +53,8 @@
if not (flags & consts.PyCF_ACCEPT_NULL_BYTES):
if '\x00' in source:
- raise OperationError(space.w_TypeError, space.wrap(
- "compile() expected string without null bytes"))
+ raise oefmt(space.w_TypeError,
+ "compile() expected string without null bytes")
if flags & consts.PyCF_ONLY_AST:
node = ec.compiler.compile_to_ast(source, filename, mode, flags)
@@ -73,8 +71,6 @@
are dictionaries, defaulting to the current current globals and locals.
If only globals is given, locals defaults to it.
"""
- w = space.wrap
-
if (space.isinstance_w(w_code, space.w_str) or
space.isinstance_w(w_code, space.w_unicode)):
w_code = compile(space,
@@ -83,8 +79,8 @@
"", "eval")
if not isinstance(w_code, PyCode):
- raise OperationError(space.w_TypeError,
- w('eval() arg 1 must be a string or code object'))
+ raise oefmt(space.w_TypeError,
+ "eval() arg 1 must be a string or code object")
if space.is_none(w_globals):
caller = space.getexecutioncontext().gettopframe_nohidden()
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import StaticMethod, ClassMethod
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w,
@@ -67,9 +67,9 @@
raise
w_type = w_objtype
if not space.is_true(space.issubtype(w_type, w_starttype)):
- raise OperationError(space.w_TypeError,
- space.wrap("super(type, obj): "
- "obj must be an instance or subtype of type"))
+ raise oefmt(space.w_TypeError,
+ "super(type, obj): obj must be an instance or "
+ "subtype of type")
# XXX the details of how allocate_instance() should be used are not
# really well defined
w_result = space.allocate_instance(W_Super, w_subtype)
@@ -126,21 +126,18 @@
if space.is_w(w_obj, space.w_None):
return space.wrap(self)
if space.is_w(self.w_fget, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "unreadable attribute"))
+ raise oefmt(space.w_AttributeError, "unreadable attribute")
return space.call_function(self.w_fget, w_obj)
def set(self, space, w_obj, w_value):
if space.is_w(self.w_fset, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "can't set attribute"))
+ raise oefmt(space.w_AttributeError, "can't set attribute")
space.call_function(self.w_fset, w_obj, w_value)
return space.w_None
def delete(self, space, w_obj):
if space.is_w(self.w_fdel, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "can't delete attribute"))
+ raise oefmt(space.w_AttributeError, "can't delete attribute")
space.call_function(self.w_fdel, w_obj)
return space.w_None
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -5,7 +5,7 @@
import sys
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef
from rpython.rlib import jit, rarithmetic
@@ -32,8 +32,7 @@
# hi-lo-1 = M-(-M-1)-1 = 2*M. Therefore unsigned long has enough
# precision to compute the RHS exactly.
if step == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("step argument must not be zero"))
+ raise oefmt(space.w_ValueError, "step argument must not be zero")
elif step < 0:
lo, hi, step = hi, lo, -step
if lo < hi:
@@ -42,8 +41,7 @@
diff = uhi - ulo - 1
n = intmask(diff // r_uint(step) + 1)
if n < 0:
- raise OperationError(space.w_OverflowError,
- space.wrap("result has too many items"))
+ raise oefmt(space.w_OverflowError, "result has too many items")
else:
n = 0
return n
@@ -63,14 +61,14 @@
w_stop = w_y
if space.isinstance_w(w_stop, space.w_float):
- raise OperationError(space.w_TypeError,
- space.wrap("range() integer end argument expected, got float."))
+ raise oefmt(space.w_TypeError,
+ "range() integer end argument expected, got float.")
if space.isinstance_w(w_start, space.w_float):
- raise OperationError(space.w_TypeError,
- space.wrap("range() integer start argument expected, got float."))
+ raise oefmt(space.w_TypeError,
+ "range() integer start argument expected, got float.")
if space.isinstance_w(w_step, space.w_float):
- raise OperationError(space.w_TypeError,
- space.wrap("range() integer step argument expected, got float."))
+ raise oefmt(space.w_TypeError,
+ "range() integer step argument expected, got float.")
w_start = space.int(w_start)
w_stop = space.int(w_stop)
@@ -112,8 +110,7 @@
step = st = space.bigint_w(w_step)
if not step.tobool():
- raise OperationError(space.w_ValueError,
- space.wrap("step argument must not be zero"))
+ raise oefmt(space.w_ValueError, "step argument must not be zero")
elif step.sign < 0:
lo, hi, st = hi, lo, st.neg()
@@ -123,8 +120,7 @@
try:
howmany = n.toint()
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("result has too many items"))
+ raise oefmt(space.w_OverflowError, "result has too many items")
else:
howmany = 0
@@ -155,16 +151,18 @@
elif len(args_w):
w_sequence = args_w[0]
else:
- msg = "%s() expects at least one argument" % (implementation_of,)
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "%s() expects at least one argument",
+ implementation_of)
w_key = None
kwds = args.keywords
if kwds:
if kwds[0] == "key" and len(kwds) == 1:
w_key = args.keywords_w[0]
else:
- msg = "%s() got unexpected keyword argument" % (implementation_of,)
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "%s() got unexpected keyword argument",
+ implementation_of)
w_iter = space.iter(w_sequence)
w_type = space.type(w_iter)
@@ -191,8 +189,7 @@
w_max_item = w_item
w_max_val = w_compare_with
if w_max_item is None:
- msg = "arg is an empty sequence"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError, "arg is an empty sequence")
return w_max_item
if unroll:
min_max_impl = jit.unroll_safe(min_max_impl)
@@ -341,8 +338,8 @@
def __init__(self, space, w_sequence):
self.remaining = space.len_w(w_sequence) - 1
if space.lookup(w_sequence, "__getitem__") is None:
- msg = "reversed() argument must be a sequence"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "reversed() argument must be a sequence")
self.w_sequence = w_sequence
def descr___iter__(self, space):
@@ -439,8 +436,7 @@
i += len
if 0 <= i < len:
return space.wrap(self.start + i * self.step)
- raise OperationError(space.w_IndexError,
- space.wrap("xrange object index out of range"))
+ raise oefmt(space.w_IndexError, "xrange object index out of range")
def descr_iter(self):
if self.promote_step and self.step == 1:
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -32,8 +32,7 @@
if space.is_true(space.callable(w_metaclass)):
return space.call_function(w_metaclass, w_name,
w_bases, w_dict)
- raise OperationError(space.w_TypeError,
- space.wrap("base must be class"))
+ raise oefmt(space.w_TypeError, "base must be class")
return W_ClassObject(space, w_name, bases_w, w_dict)
@@ -58,28 +57,23 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__dict__ must be a dictionary object"))
+ raise oefmt(space.w_TypeError,
+ "__dict__ must be a dictionary object")
self.w_dict = w_dict
def setname(self, space, w_newname):
if not space.isinstance_w(w_newname, space.w_str):
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be a string object")
- )
+ raise oefmt(space.w_TypeError, "__name__ must be a string object")
self.name = space.str_w(w_newname)
def setbases(self, space, w_bases):
if not space.isinstance_w(w_bases, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("__bases__ must be a tuple object")
- )
+ raise oefmt(space.w_TypeError, "__bases__ must be a tuple object")
bases_w = space.fixedview(w_bases)
for w_base in bases_w:
if not isinstance(w_base, W_ClassObject):
- raise OperationError(space.w_TypeError,
- space.wrap("__bases__ items must be classes"))
+ raise oefmt(space.w_TypeError,
+ "__bases__ items must be classes")
self.bases_w = bases_w
def is_subclass_of(self, other):
@@ -207,13 +201,9 @@
if w_init is not None:
w_result = space.call_args(w_init, __args__)
if not space.is_w(w_result, space.w_None):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__init__() should return None"))
+ raise oefmt(space.w_TypeError, "__init__() should return None")
elif __args__.arguments_w or __args__.keywords:
- raise OperationError(
- space.w_TypeError,
- space.wrap("this constructor takes no arguments"))
+ raise oefmt(space.w_TypeError, "this constructor takes no arguments")
return w_inst
W_ClassObject.typedef = TypeDef("classobj",
@@ -297,9 +287,7 @@
def descr_instance_new(space, w_type, w_class, w_dict=None):
# w_type is not used at all
if not isinstance(w_class, W_ClassObject):
- raise OperationError(
- space.w_TypeError,
- space.wrap("instance() first arg must be class"))
+ raise oefmt(space.w_TypeError, "instance() first arg must be class")
w_result = w_class.instantiate(space)
if not space.is_none(w_dict):
w_result.setdict(space, w_dict)
@@ -318,9 +306,7 @@
def set_oldstyle_class(self, space, w_class):
if w_class is None or not isinstance(w_class, W_ClassObject):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__class__ must be set to a class"))
+ raise oefmt(space.w_TypeError, "__class__ must be set to a class")
self.w_class = w_class
def getattr_from_class(self, space, name):
@@ -453,13 +439,9 @@
w_result = space.call_function(w_meth)
if space.isinstance_w(w_result, space.w_int):
if space.is_true(space.lt(w_result, space.wrap(0))):
- raise OperationError(
- space.w_ValueError,
- space.wrap("__len__() should return >= 0"))
+ raise oefmt(space.w_ValueError, "__len__() should return >= 0")
return w_result
- raise OperationError(
- space.w_TypeError,
- space.wrap("__len__() should return an int"))
+ raise oefmt(space.w_TypeError, "__len__() should return an int")
def descr_getitem(self, space, w_key):
w_meth = self.getattr(space, '__getitem__')
@@ -479,9 +461,7 @@
return space.call_function(w_meth)
w_meth = self.getattr(space, '__getitem__', False)
if w_meth is None:
- raise OperationError(
- space.w_TypeError,
- space.wrap("iteration over non-sequence"))
+ raise oefmt(space.w_TypeError, "iteration over non-sequence")
return space.newseqiter(self)
#XXX do I really need a next method? the old implementation had one, but I
# don't see the point
@@ -521,13 +501,10 @@
w_result = space.call_function(w_func)
if space.isinstance_w(w_result, space.w_int):
if space.is_true(space.lt(w_result, space.wrap(0))):
- raise OperationError(
- space.w_ValueError,
- space.wrap("__nonzero__() should return >= 0"))
+ raise oefmt(space.w_ValueError,
+ "__nonzero__() should return >= 0")
return w_result
- raise OperationError(
- space.w_TypeError,
- space.wrap("__nonzero__() should return an int"))
+ raise oefmt(space.w_TypeError, "__nonzero__() should return an int")
def descr_cmp(self, space, w_other): # do all the work here like CPython
w_a, w_b = _coerce_helper(space, self, w_other)
@@ -544,9 +521,8 @@
res = space.int_w(w_res)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__cmp__ must return int"))
+ raise oefmt(space.w_TypeError,
+ "__cmp__ must return int")
raise
if res > 0:
return space.wrap(1)
@@ -563,9 +539,8 @@
res = space.int_w(w_res)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__cmp__ must return int"))
+ raise oefmt(space.w_TypeError,
+ "__cmp__ must return int")
raise
if res < 0:
return space.wrap(1)
@@ -580,16 +555,13 @@
w_eq = self.getattr(space, '__eq__', False)
w_cmp = self.getattr(space, '__cmp__', False)
if w_eq is not None or w_cmp is not None:
- raise OperationError(space.w_TypeError,
- space.wrap("unhashable instance"))
+ raise oefmt(space.w_TypeError, "unhashable instance")
else:
return space.wrap(compute_identity_hash(self))
w_ret = space.call_function(w_func)
if (not space.isinstance_w(w_ret, space.w_int) and
not space.isinstance_w(w_ret, space.w_long)):
- raise OperationError(
- space.w_TypeError,
- space.wrap("__hash__ must return int or long"))
+ raise oefmt(space.w_TypeError, "__hash__ must return int or long")
return w_ret
def descr_int(self, space):
@@ -603,9 +575,7 @@
return space.int(w_truncated)
except OperationError:
# Raise a different error
- raise OperationError(
- space.w_TypeError,
- space.wrap("__trunc__ returned non-Integral"))
+ raise oefmt(space.w_TypeError, "__trunc__ returned non-Integral")
def descr_long(self, space):
w_func = self.getattr(space, '__long__', False)
@@ -617,9 +587,8 @@
w_func = self.getattr(space, '__index__', False)
if w_func is not None:
return space.call_function(w_func)
- raise OperationError(
- space.w_TypeError,
- space.wrap("object cannot be interpreted as an index"))
+ raise oefmt(space.w_TypeError,
+ "object cannot be interpreted as an index")
def descr_contains(self, space, w_obj):
w_func = self.getattr(space, '__contains__', False)
@@ -674,8 +643,7 @@
def descr_next(self, space):
w_func = self.getattr(space, 'next', False)
if w_func is None:
- raise OperationError(space.w_TypeError,
- space.wrap("instance has no next() method"))
+ raise oefmt(space.w_TypeError, "instance has no next() method")
return space.call_function(w_func)
def descr_del(self, space):
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -3,7 +3,7 @@
"""
from pypy.interpreter import gateway
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from rpython.rlib.runicode import UNICHR
from rpython.rlib.rfloat import isnan, isinf, round_double
@@ -19,8 +19,7 @@
try:
char = __builtin__.chr(space.int_w(w_ascii))
except ValueError: # chr(out-of-range)
- raise OperationError(space.w_ValueError,
- space.wrap("character code not in range(256)"))
+ raise oefmt(space.w_ValueError, "character code not in range(256)")
return space.wrap(char)
@unwrap_spec(code=int)
@@ -30,8 +29,7 @@
try:
c = UNICHR(code)
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("unichr() arg out of range"))
+ raise oefmt(space.w_ValueError, "unichr() arg out of range")
return space.wrap(c)
def len(space, w_obj):
@@ -151,8 +149,8 @@
# finite x, and ndigits is not unreasonably large
z = round_double(number, ndigits)
if isinf(z):
- raise OperationError(space.w_OverflowError,
- space.wrap("rounded value too large to represent"))
+ raise oefmt(space.w_OverflowError,
+ "rounded value too large to represent")
return space.wrap(z)
# ____________________________________________________________
@@ -227,7 +225,7 @@
same value."""
if space.is_w(space.type(w_str), space.w_str):
return space.new_interned_w_str(w_str)
- raise OperationError(space.w_TypeError, space.wrap("intern() argument must be string."))
+ raise oefmt(space.w_TypeError, "intern() argument must be string.")
def callable(space, w_object):
"""Check whether the object appears to be callable (i.e., some kind of
diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py
--- a/pypy/module/__pypy__/interp_builders.py
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
from rpython.rlib.rstring import UnicodeBuilder, StringBuilder
@@ -16,8 +16,8 @@
def _check_done(self, space):
if self.builder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "Can't operate on a built builder"))
+ raise oefmt(space.w_ValueError,
+ "Can't operate on a built builder")
@unwrap_spec(size=int)
def descr__new__(space, w_subtype, size=-1):
@@ -32,8 +32,7 @@
def descr_append_slice(self, space, s, start, end):
self._check_done(space)
if not 0 <= start <= end <= len(s):
- raise OperationError(space.w_ValueError, space.wrap(
- "bad start/stop"))
+ raise oefmt(space.w_ValueError, "bad start/stop")
self.builder.append_slice(s, start, end)
def descr_build(self, space):
@@ -44,8 +43,7 @@
def descr_len(self, space):
if self.builder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "no length of built builder"))
+ raise oefmt(space.w_ValueError, "no length of built builder")
return space.wrap(self.builder.getlength())
W_Builder.__name__ = "W_%s" % name
diff --git a/pypy/module/__pypy__/interp_identitydict.py b/pypy/module/__pypy__/interp_identitydict.py
--- a/pypy/module/__pypy__/interp_identitydict.py
+++ b/pypy/module/__pypy__/interp_identitydict.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.baseobjspace import W_Root
@@ -35,9 +35,9 @@
raise OperationError(space.w_KeyError, w_key)
def descr_iter(self, space):
- raise OperationError(space.w_TypeError,
- space.wrap("'identity_dict' object does not support iteration; "
- "iterate over x.keys()"))
+ raise oefmt(space.w_TypeError,
+ "'identity_dict' object does not support iteration; "
+ "iterate over x.keys()")
def get(self, space, w_key, w_default=None):
if w_default is None:
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
+from pypy.interpreter.error import oefmt, wrap_oserror
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.pycode import CodeHookCache
from pypy.interpreter.pyframe import PyFrame
@@ -74,8 +74,8 @@
def lookup_special(space, w_obj, meth):
"""Lookup up a special method on an object."""
if space.is_oldstyle_instance(w_obj):
- w_msg = space.wrap("this doesn't do what you want on old-style classes")
- raise OperationError(space.w_TypeError, w_msg)
+ raise oefmt(space.w_TypeError,
+ "this doesn't do what you want on old-style classes")
w_descr = space.lookup(w_obj, meth)
if w_descr is None:
return space.w_None
@@ -97,8 +97,7 @@
elif isinstance(w_obj, W_BaseSetObject):
name = w_obj.strategy.__class__.__name__
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expecting dict or list or set object"))
+ raise oefmt(space.w_TypeError, "expecting dict or list or set object")
return space.wrap(name)
@@ -119,8 +118,7 @@
@unwrap_spec(sizehint=int)
def resizelist_hint(space, w_iterable, sizehint):
if not isinstance(w_iterable, W_ListObject):
- raise OperationError(space.w_TypeError,
- space.wrap("arg 1 must be a 'list'"))
+ raise oefmt(space.w_TypeError, "arg 1 must be a 'list'")
w_iterable._resize_hint(sizehint)
@unwrap_spec(sizehint=int)
@@ -181,8 +179,7 @@
elif space.is_w(space.type(w_obj), space.w_str):
jit.promote_string(space.str_w(w_obj))
elif space.is_w(space.type(w_obj), space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- "promoting unicode unsupported"))
+ raise oefmt(space.w_TypeError, "promoting unicode unsupported")
else:
jit.promote(w_obj)
return w_obj
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -88,8 +88,7 @@
ctype = self.ctype
if not isinstance(ctype, W_CTypeFunc):
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("expected a function ctype"))
+ raise oefmt(space.w_TypeError, "expected a function ctype")
return ctype
def hide_object(self):
@@ -219,8 +218,8 @@
invoke_callback,
unique_id)
if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK:
- raise OperationError(space.w_SystemError,
- space.wrap("libffi failed to build this callback"))
+ raise oefmt(space.w_SystemError,
+ "libffi failed to build this callback")
def py_invoke(self, ll_res, ll_args):
jitdriver1.jit_merge_point(callback=self,
@@ -234,9 +233,9 @@
space = fresult.space
if isinstance(fresult, W_CTypeVoid):
if not space.is_w(w_res, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("callback with the return type 'void'"
- " must return None"))
+ raise oefmt(space.w_TypeError,
+ "callback with the return type 'void' must return "
+ "None")
return
#
small_result = encode_result_for_libffi and fresult.size < SIZE_OF_FFI_ARG
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -113,8 +113,9 @@
if requires_ordering:
if (isinstance(self.ctype, W_CTypePrimitive) or
isinstance(w_other.ctype, W_CTypePrimitive)):
- raise OperationError(space.w_TypeError, space.wrap(
- "cannot do comparison on a primitive cdata"))
+ raise oefmt(space.w_TypeError,
+ "cannot do comparison on a primitive "
+ "cdata")
ptr1 = rffi.cast(lltype.Unsigned, ptr1)
ptr2 = rffi.cast(lltype.Unsigned, ptr2)
result = op(ptr1, ptr2)
@@ -175,22 +176,18 @@
space = self.space
#
if space.is_w(w_slice.w_start, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice start must be specified"))
+ raise oefmt(space.w_IndexError, "slice start must be specified")
start = space.int_w(w_slice.w_start)
#
if space.is_w(w_slice.w_stop, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice stop must be specified"))
+ raise oefmt(space.w_IndexError, "slice stop must be specified")
stop = space.int_w(w_slice.w_stop)
#
if not space.is_w(w_slice.w_step, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice with step not supported"))
+ raise oefmt(space.w_IndexError, "slice with step not supported")
#
if start > stop:
- raise OperationError(space.w_IndexError,
- space.wrap("slice start > stop"))
+ raise oefmt(space.w_IndexError, "slice start > stop")
#
ctype = self.ctype._check_slice_index(self, start, stop)
assert isinstance(ctype, W_CTypePointer)
diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py
--- a/pypy/module/_cffi_backend/ctypearray.py
+++ b/pypy/module/_cffi_backend/ctypearray.py
@@ -40,8 +40,8 @@
try:
datasize = ovfcheck(length * self.ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
else:
length = self.length
#
@@ -55,8 +55,7 @@
def _check_subscript_index(self, w_cdata, i):
space = self.space
if i < 0:
- raise OperationError(space.w_IndexError,
- space.wrap("negative index not supported"))
+ raise oefmt(space.w_IndexError, "negative index not supported")
if i >= w_cdata.get_array_length():
raise oefmt(space.w_IndexError,
"index too large for cdata '%s' (expected %d < %d)",
@@ -66,8 +65,7 @@
def _check_slice_index(self, w_cdata, start, stop):
space = self.space
if start < 0:
- raise OperationError(space.w_IndexError,
- space.wrap("negative index not supported"))
+ raise oefmt(space.w_IndexError, "negative index not supported")
if stop > w_cdata.get_array_length():
raise oefmt(space.w_IndexError,
"index too large (expected %d <= %d)",
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -471,5 +471,5 @@
# call libffi's ffi_prep_cif() function
res = jit_libffi.jit_ffi_prep_cif(rawmem)
if res != clibffi.FFI_OK:
- raise OperationError(space.w_SystemError,
- space.wrap("libffi failed to build this function type"))
+ raise oefmt(space.w_SystemError,
+ "libffi failed to build this function type")
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -185,26 +185,24 @@
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError,
- space.wrap("field name or array index expected"))
+ raise oefmt(space.w_TypeError,
+ "field name or array index expected")
return self.typeoffsetof_index(index)
else:
return self.typeoffsetof_field(fieldname, following)
def typeoffsetof_field(self, fieldname, following):
- space = self.space
- msg = "with a field name argument, expected a struct or union ctype"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "with a field name argument, expected a struct or union "
+ "ctype")
def typeoffsetof_index(self, index):
- space = self.space
- msg = "with an integer argument, expected an array or pointer ctype"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "with an integer argument, expected an array or pointer "
+ "ctype")
def rawaddressof(self, cdata, offset):
- space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("expected a pointer ctype"))
+ raise oefmt(self.space.w_TypeError, "expected a pointer ctype")
def call(self, funcaddr, args_w):
space = self.space
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -289,8 +289,8 @@
try:
datasize = ovfcheck(length * itemsize)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
result = lltype.malloc(rffi.CCHARP.TO, datasize,
flavor='raw', zero=True)
try:
@@ -322,13 +322,12 @@
space = self.space
ctitem = self.ctitem
if ctitem.size < 0:
- raise OperationError(space.w_TypeError,
- space.wrap("pointer to opaque"))
+ raise oefmt(space.w_TypeError, "pointer to opaque")
try:
offset = ovfcheck(index * ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array offset would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array offset would overflow a ssize_t")
return ctitem, offset
def rawaddressof(self, cdata, offset):
@@ -341,9 +340,8 @@
ptr = rffi.ptradd(ptr, offset)
return cdataobj.W_CData(space, ptr, self)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expected a cdata struct/union/array/pointer"
- " object"))
+ raise oefmt(space.w_TypeError,
+ "expected a cdata struct/union/array/pointer object")
def _fget(self, attrchar):
if attrchar == 'i': # item
@@ -377,8 +375,7 @@
if w_fileobj.cffi_fileobj is None:
fd = w_fileobj.direct_fileno()
if fd < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("file has no OS file descriptor"))
+ raise oefmt(space.w_ValueError, "file has no OS file descriptor")
try:
w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
except OSError as e:
diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
--- a/pypy/module/_cffi_backend/ctypestruct.py
+++ b/pypy/module/_cffi_backend/ctypestruct.py
@@ -94,8 +94,7 @@
except KeyError:
raise OperationError(space.w_KeyError, space.wrap(fieldname))
if cfield.bitshift >= 0:
- raise OperationError(space.w_TypeError,
- space.wrap("not supported for bitfields"))
+ raise oefmt(space.w_TypeError, "not supported for bitfields")
return (cfield.ctype, cfield.offset)
def _copy_from_same(self, cdata, w_ob):
@@ -243,8 +242,8 @@
varsize = ovfcheck(itemsize * varsizelength)
size = ovfcheck(self.offset + varsize)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
assert size >= 0
return max(size, optvarsize)
# if 'value' was only an integer, get_new_array_length() returns
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -44,8 +44,7 @@
raise oefmt(space.w_ValueError,
"ctype '%s' is of unknown size", w_obj.name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expected a 'cdata' or 'ctype' object"))
+ raise oefmt(space.w_TypeError, "expected a 'cdata' or 'ctype' object")
return space.wrap(size)
@unwrap_spec(w_ctype=ctypeobj.W_CType)
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -1,6 +1,6 @@
from __future__ import with_statement
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib import jit
from rpython.rlib.objectmodel import specialize
@@ -285,8 +285,7 @@
try:
return _standard_object_as_bool(space, w_io)
except _NotStandardObject:
- raise OperationError(space.w_TypeError,
- space.wrap("integer/float expected"))
+ raise oefmt(space.w_TypeError, "integer/float expected")
# ____________________________________________________________
@@ -300,8 +299,7 @@
else:
explicitlength = space.getindex_w(w_value, space.w_OverflowError)
if explicitlength < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative array length"))
+ raise oefmt(space.w_ValueError, "negative array length")
return (space.w_None, explicitlength)
# ____________________________________________________________
diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -181,16 +181,14 @@
else:
length = space.getindex_w(w_length, space.w_OverflowError)
if length < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative array length"))
+ raise oefmt(space.w_ValueError, "negative array length")
return _new_array_type(space, w_ctptr, length)
@jit.elidable
def _new_array_type(space, w_ctptr, length):
_setup_wref(rweakref.has_weakref_support())
if not isinstance(w_ctptr, ctypeptr.W_CTypePointer):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a pointer ctype"))
+ raise oefmt(space.w_TypeError, "first arg must be a pointer ctype")
arrays = w_ctptr._array_types
if arrays is None:
arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray)
@@ -212,8 +210,8 @@
try:
arraysize = ovfcheck(length * ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
extra = '[%d]' % length
#
ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra)
@@ -290,9 +288,9 @@
sflags = complete_sflags(sflags)
if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion)
or w_ctype.size >= 0):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a non-initialized"
- " struct or union ctype"))
+ raise oefmt(space.w_TypeError,
+ "first arg must be a non-initialized struct or union "
+ "ctype")
is_union = isinstance(w_ctype, ctypestruct.W_CTypeUnion)
alignment = 1
@@ -310,8 +308,7 @@
w_field = fields_w[i]
field_w = space.fixedview(w_field)
if not (2 <= len(field_w) <= 4):
- raise OperationError(space.w_TypeError,
- space.wrap("bad field descr"))
+ raise oefmt(space.w_TypeError, "bad field descr")
fname = space.str_w(field_w[0])
ftype = space.interp_w(ctypeobj.W_CType, field_w[1])
fbitsize = -1
@@ -564,14 +561,13 @@
enumerators_w = space.fixedview(w_enumerators)
enumvalues_w = space.fixedview(w_enumvalues)
if len(enumerators_w) != len(enumvalues_w):
- raise OperationError(space.w_ValueError,
- space.wrap("tuple args must have the same size"))
+ raise oefmt(space.w_ValueError, "tuple args must have the same size")
enumerators = [space.str_w(w) for w in enumerators_w]
#
if (not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveSigned) and
not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveUnsigned)):
- raise OperationError(space.w_TypeError,
- space.wrap("expected a primitive signed or unsigned base type"))
+ raise oefmt(space.w_TypeError,
+ "expected a primitive signed or unsigned base type")
#
lvalue = lltype.malloc(rffi.CCHARP.TO, w_basectype.size, flavor='raw')
try:
@@ -601,8 +597,8 @@
fargs = []
for w_farg in space.fixedview(w_fargs):
if not isinstance(w_farg, ctypeobj.W_CType):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a tuple of ctype objects"))
+ raise oefmt(space.w_TypeError,
+ "first arg must be a tuple of ctype objects")
if isinstance(w_farg, ctypearray.W_CTypeArray):
w_farg = w_farg.ctptr
fargs.append(w_farg)
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -119,9 +119,7 @@
if space.is_true(space.callable(w_search_function)):
state.codec_search_path.append(w_search_function)
else:
- raise OperationError(
- space.w_TypeError,
- space.wrap("argument must be callable"))
+ raise oefmt(space.w_TypeError, "argument must be callable")
@unwrap_spec(encoding=str)
@@ -148,19 +146,17 @@
space.call_function(w_import, space.wrap("encodings"))
state.codec_need_encodings = False
if len(state.codec_search_path) == 0:
- raise OperationError(
- space.w_LookupError,
- space.wrap("no codec search functions registered: "
- "can't find encoding"))
+ raise oefmt(space.w_LookupError,
+ "no codec search functions registered: can't find "
+ "encoding")
for w_search in state.codec_search_path:
w_result = space.call_function(w_search,
space.wrap(normalized_encoding))
if not space.is_w(w_result, space.w_None):
if not (space.isinstance_w(w_result, space.w_tuple) and
space.len_w(w_result) == 4):
- raise OperationError(
- space.w_TypeError,
- space.wrap("codec search functions must return 4-tuples"))
+ raise oefmt(space.w_TypeError,
+ "codec search functions must return 4-tuples")
else:
state.codec_search_cache[normalized_encoding] = w_result
state.modified()
@@ -178,22 +174,19 @@
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- "wrong exception"))
+ raise oefmt(space.w_TypeError, "wrong exception")
delta = space.int_w(w_end) - space.int_w(w_start)
if delta < 0 or not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- "wrong exception"))
+ raise oefmt(space.w_TypeError, "wrong exception")
def strict_errors(space, w_exc):
check_exception(space, w_exc)
if space.isinstance_w(w_exc, space.w_BaseException):
raise OperationError(space.type(w_exc), w_exc)
else:
- raise OperationError(space.w_TypeError, space.wrap(
- "codec must pass exception instance"))
+ raise oefmt(space.w_TypeError, "codec must pass exception instance")
def ignore_errors(space, w_exc):
check_exception(space, w_exc)
@@ -350,9 +343,8 @@
if space.is_true(w_decoder):
w_res = space.call_function(w_decoder, w_obj, space.wrap(errors))
if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 2):
- raise OperationError(
- space.w_TypeError,
- space.wrap("encoder must return a tuple (object, integer)"))
+ raise oefmt(space.w_TypeError,
+ "encoder must return a tuple (object, integer)")
return space.getitem(w_res, space.wrap(0))
else:
assert 0, "XXX, what to do here?"
@@ -371,9 +363,7 @@
if space.is_true(space.callable(w_handler)):
state.codec_error_registry[errors] = w_handler
else:
- raise OperationError(
- space.w_TypeError,
- space.wrap("handler must be callable"))
+ raise oefmt(space.w_TypeError, "handler must be callable")
# ____________________________________________________________
# delegation to runicode
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -4,7 +4,7 @@
from pypy.interpreter.typedef import TypeDef, make_weakref_descr
from pypy.interpreter.typedef import GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib.debug import check_nonneg
@@ -76,9 +76,8 @@
def checklock(self, lock):
if lock is not self.lock:
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "deque mutated during iteration")
def init(self, w_iterable=None, w_maxlen=None):
space = self.space
@@ -200,8 +199,7 @@
def pop(self):
"Remove and return the rightmost element."
if self.len == 0:
- msg = "pop from an empty deque"
- raise OperationError(self.space.w_IndexError, self.space.wrap(msg))
+ raise oefmt(self.space.w_IndexError, "pop from an empty deque")
self.len -= 1
ri = self.rightindex
w_obj = self.rightblock.data[ri]
@@ -224,8 +222,7 @@
def popleft(self):
"Remove and return the leftmost element."
if self.len == 0:
- msg = "pop from an empty deque"
- raise OperationError(self.space.w_IndexError, self.space.wrap(msg))
+ raise oefmt(self.space.w_IndexError, "pop from an empty deque")
self.len -= 1
li = self.leftindex
w_obj = self.leftblock.data[li]
@@ -263,8 +260,7 @@
if index >= BLOCKLEN:
block = block.rightlink
index = 0
- raise OperationError(space.w_ValueError,
- space.wrap("deque.remove(x): x not in deque"))
+ raise oefmt(space.w_ValueError, "deque.remove(x): x not in deque")
def reverse(self):
"Reverse *IN PLACE*."
@@ -371,8 +367,7 @@
b, i = self.locate(start)
return b.data[i]
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def setitem(self, w_index, w_newobj):
space = self.space
@@ -381,8 +376,7 @@
b, i = self.locate(start)
b.data[i] = w_newobj
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def delitem(self, w_index):
space = self.space
@@ -390,8 +384,7 @@
if step == 0: # index only
self.del_item(start)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def copy(self):
"Return a shallow copy of a deque."
@@ -520,13 +513,12 @@
return self.space.wrap(self.counter)
def next(self):
+ space = self.space
if self.lock is not self.deque.lock:
self.counter = 0
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(space.w_RuntimeError, "deque mutated during iteration")
if self.counter == 0:
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
self.counter -= 1
ri = self.index
w_x = self.block.data[ri]
@@ -563,13 +555,12 @@
return self.space.wrap(self.counter)
def next(self):
+ space = self.space
if self.lock is not self.deque.lock:
self.counter = 0
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(space.w_RuntimeError, "deque mutated during iteration")
if self.counter == 0:
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
self.counter -= 1
ri = self.index
w_x = self.block.data[ri]
diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py
--- a/pypy/module/_csv/interp_csv.py
+++ b/pypy/module/_csv/interp_csv.py
@@ -106,18 +106,17 @@
# validate options
if not (0 <= tmp_quoting < 4):
- raise OperationError(space.w_TypeError,
- space.wrap('bad "quoting" value'))
+ raise oefmt(space.w_TypeError, 'bad "quoting" value')
if dialect.delimiter == '\0':
- raise OperationError(space.w_TypeError,
- space.wrap('"delimiter" must be a 1-character string'))
+ raise oefmt(space.w_TypeError,
+ '"delimiter" must be a 1-character string')
if space.is_w(w_quotechar, space.w_None) and w_quoting is None:
tmp_quoting = QUOTE_NONE
if tmp_quoting != QUOTE_NONE and dialect.quotechar == '\0':
- raise OperationError(space.w_TypeError,
- space.wrap('quotechar must be set if quoting enabled'))
+ raise oefmt(space.w_TypeError,
+ "quotechar must be set if quoting enabled")
dialect.quoting = tmp_quoting
return dialect
diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py
--- a/pypy/module/_csv/interp_reader.py
+++ b/pypy/module/_csv/interp_reader.py
@@ -1,6 +1,6 @@
from rpython.rlib.rstring import StringBuilder
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.typedef import TypeDef, interp2app
from pypy.interpreter.typedef import interp_attrproperty_w, interp_attrproperty
@@ -27,10 +27,9 @@
def error(self, msg):
space = self.space
- msg = 'line %d: %s' % (self.line_num, msg)
w_module = space.getbuiltinmodule('_csv')
w_error = space.getattr(w_module, space.wrap('Error'))
- raise OperationError(w_error, space.wrap(msg))
+ raise oefmt(w_error, "line %d: %s", self.line_num, msg)
error._dont_inline_ = True
def add_char(self, field_builder, c):
diff --git a/pypy/module/_demo/demo.py b/pypy/module/_demo/demo.py
--- a/pypy/module/_demo/demo.py
+++ b/pypy/module/_demo/demo.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty
@@ -22,8 +22,7 @@
def measuretime(space, repetitions, w_callable):
if repetitions <= 0:
w_DemoError = get(space, 'DemoError')
- msg = "repetition count must be > 0"
- raise OperationError(w_DemoError, space.wrap(msg))
+ raise oefmt(w_DemoError, "repetition count must be > 0")
starttime = time(0)
for i in range(repetitions):
space.call_function(w_callable)
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -94,19 +94,16 @@
def check_closed(self):
if self.stream is None:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("I/O operation on closed file")
- )
+ raise oefmt(self.space.w_ValueError,
+ "I/O operation on closed file")
def check_readable(self):
if not self.readable:
- raise OperationError(self.space.w_IOError, self.space.wrap(
- "File not open for reading"))
+ raise oefmt(self.space.w_IOError, "File not open for reading")
def check_writable(self):
if not self.writable:
- raise OperationError(self.space.w_IOError, self.space.wrap(
- "File not open for writing"))
+ raise oefmt(self.space.w_IOError, "File not open for writing")
def getstream(self):
"""Return self.stream or raise an app-level ValueError if missing
@@ -512,8 +509,9 @@
else:
line = w_line.charbuf_w(space)
except BufferInterfaceNotFound:
- raise OperationError(space.w_TypeError, space.wrap(
- "writelines() argument must be a sequence of strings"))
+ raise oefmt(space.w_TypeError,
+ "writelines() argument must be a sequence of "
+ "strings")
else:
lines[i] = space.wrap(line)
for w_line in lines:
diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -3,7 +3,7 @@
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
@@ -58,14 +58,12 @@
def lock(self):
if not self._try_acquire_lock():
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("stream lock already held"))
+ raise oefmt(self.space.w_RuntimeError, "stream lock already held")
def unlock(self):
me = self.space.getexecutioncontext() # used as thread ident
if self.slockowner is not me:
- raise OperationError(self.space.w_RuntimeError,
- self.space.wrap("stream lock is not held"))
+ raise oefmt(self.space.w_RuntimeError, "stream lock is not held")
self._release_lock()
def _cleanup_(self):
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -7,7 +7,7 @@
from rpython.tool.sourcetools import func_renamer
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, interp2app
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.module.thread.os_lock import Lock
@@ -85,8 +85,7 @@
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
if not digest_type:
- raise OperationError(space.w_ValueError,
- space.wrap("unknown hash function"))
+ raise oefmt(space.w_ValueError, "unknown hash function")
return digest_type
def descr_repr(self, space):
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -42,8 +42,7 @@
## self.lock.free()
self.lock = space.allocate_lock()
self.owner = 0
- self.operr = OperationError(space.w_RuntimeError,
- space.wrap("reentrant call"))
+ self.operr = oefmt(space.w_RuntimeError, "reentrant call")
def __enter__(self):
if not self.lock.acquire(False):
@@ -91,8 +90,7 @@
w_data = space.call_method(self, "read", space.wrap(length))
if not space.isinstance_w(w_data, space.w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "read() should return bytes"))
+ raise oefmt(space.w_TypeError, "read() should return bytes")
data = space.str_w(w_data)
rwbuffer.setslice(0, data)
return space.wrap(len(data))
@@ -157,8 +155,8 @@
def _init(self, space):
if self.buffer_size <= 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "buffer size must be strictly positive"))
+ raise oefmt(space.w_ValueError,
+ "buffer size must be strictly positive")
self.buffer = ['\0'] * self.buffer_size
@@ -171,11 +169,10 @@
def _check_init(self, space):
if self.state == STATE_ZERO:
- raise OperationError(space.w_ValueError, space.wrap(
- "I/O operation on uninitialized object"))
+ raise oefmt(space.w_ValueError,
+ "I/O operation on uninitialized object")
elif self.state == STATE_DETACHED:
- raise OperationError(space.w_ValueError, space.wrap(
- "raw stream has been detached"))
+ raise oefmt(space.w_ValueError, "raw stream has been detached")
def _check_closed(self, space, message=None):
self._check_init(space)
@@ -185,8 +182,8 @@
w_pos = space.call_method(self.w_raw, "tell")
pos = space.r_longlong_w(w_pos)
if pos < 0:
- raise OperationError(space.w_IOError, space.wrap(
- "raw stream returned invalid position"))
+ raise oefmt(space.w_IOError,
+ "raw stream returned invalid position")
self.abs_pos = pos
return pos
@@ -297,8 +294,8 @@
space.wrap(pos), space.wrap(whence))
pos = space.r_longlong_w(w_pos)
if pos < 0:
- raise OperationError(space.w_IOError, space.wrap(
- "Raw stream returned invalid position"))
+ raise oefmt(space.w_IOError,
+ "Raw stream returned invalid position")
self.abs_pos = pos
return pos
@@ -363,8 +360,7 @@
written = space.getindex_w(w_written, space.w_IOError)
if not 0 <= written <= len(data):
- raise OperationError(space.w_IOError, space.wrap(
- "raw write() returned invalid length"))
+ raise oefmt(space.w_IOError, "raw write() returned invalid length")
if self.abs_pos != -1:
self.abs_pos += written
return written
@@ -417,8 +413,8 @@
with self.lock:
res = self._read_generic(space, size)
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "read length must be positive or -1"))
+ raise oefmt(space.w_ValueError,
+ "read length must be positive or -1")
return space.wrap(res)
@unwrap_spec(size=int)
@@ -454,8 +450,7 @@
self._check_closed(space, "read of closed file")
if size < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "read length must be positive"))
+ raise oefmt(space.w_ValueError, "read length must be positive")
if size == 0:
return space.wrap("")
@@ -537,9 +532,9 @@
raise BlockingIOError()
size = space.int_w(w_size)
if size < 0 or size > length:
- raise OperationError(space.w_IOError, space.wrap(
- "raw readinto() returned invalid length %d "
- "(should have been between 0 and %d)" % (size, length)))
+ raise oefmt(space.w_IOError,
+ "raw readinto() returned invalid length %d (should "
+ "have been between 0 and %d)", size, length)
if self.abs_pos != -1:
self.abs_pos += size
return size
diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py
--- a/pypy/module/_io/interp_bytesio.py
+++ b/pypy/module/_io/interp_bytesio.py
@@ -70,8 +70,7 @@
size = space.r_longlong_w(w_size)
if size < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative size value"))
+ raise oefmt(space.w_ValueError, "negative size value")
self.truncate(size)
if size == pos:
@@ -94,16 +93,13 @@
if whence == 0:
if pos < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative seek value"))
+ raise oefmt(space.w_ValueError, "negative seek value")
elif whence == 1:
if pos > sys.maxint - self.tell():
- raise OperationError(space.w_OverflowError, space.wrap(
- "new position too large"))
+ raise oefmt(space.w_OverflowError, "new position too large")
elif whence == 2:
if pos > sys.maxint - self.getsize():
- raise OperationError(space.w_OverflowError, space.wrap(
- "new position too large"))
+ raise oefmt(space.w_OverflowError, "new position too large")
else:
raise oefmt(space.w_ValueError,
"whence must be between 0 and 2, not %d", whence)
@@ -148,8 +144,8 @@
self.write_w(space, w_content)
pos = space.int_w(w_pos)
if pos < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "position value cannot be negative"))
+ raise oefmt(space.w_ValueError,
+ "position value cannot be negative")
self.seek(pos)
if not space.is_w(w_dict, space.w_None):
space.call_method(self.getdict(space), "update", w_dict)
diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py
--- a/pypy/module/_io/interp_fileio.py
+++ b/pypy/module/_io/interp_fileio.py
@@ -1,6 +1,7 @@
from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2
+from pypy.interpreter.error import (
+ OperationError, oefmt, wrap_oserror, wrap_oserror2)
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.rstring import StringBuilder
from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC
@@ -12,8 +13,7 @@
def fget(space, obj):
w_value = getattr(obj, name)
if w_value is None:
- raise OperationError(space.w_AttributeError,
- space.wrap(name))
+ raise OperationError(space.w_AttributeError, space.wrap(name))
else:
return w_value
def fset(space, obj, w_value):
@@ -21,8 +21,7 @@
def fdel(space, obj):
w_value = getattr(obj, name)
if w_value is None:
- raise OperationError(space.w_AttributeError,
- space.wrap(name))
+ raise OperationError(space.w_AttributeError, space.wrap(name))
setattr(obj, name, None)
return GetSetProperty(fget, fset, fdel, cls=cls, doc=doc)
@@ -32,8 +31,8 @@
O_APPEND = getattr(os, "O_APPEND", 0)
def _bad_mode(space):
- raise OperationError(space.w_ValueError, space.wrap(
- "Must have exactly one of read/write/append mode"))
+ raise oefmt(space.w_ValueError,
+ "Must have exactly one of read/write/append mode")
def decode_mode(space, mode):
flags = 0
@@ -70,8 +69,7 @@
readable = writable = True
plus = True
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid mode: %s" % (mode,)))
+ raise oefmt(space.w_ValueError, "invalid mode: %s", mode)
if not rwa:
_bad_mode(space)
@@ -133,8 +131,8 @@
@unwrap_spec(mode=str, closefd=int)
def descr_init(self, space, w_name, mode='r', closefd=True):
if space.isinstance_w(w_name, space.w_float):
- raise OperationError(space.w_TypeError, space.wrap(
- "integer argument expected, got float"))
+ raise oefmt(space.w_TypeError,
+ "integer argument expected, got float")
fd = -1
try:
@@ -143,8 +141,7 @@
pass
else:
if fd < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative file descriptor"))
+ raise oefmt(space.w_ValueError, "negative file descriptor")
self.readable, self.writable, self.appending, flags = decode_mode(space, mode)
@@ -162,8 +159,8 @@
else:
self.closefd = True
if not closefd:
- raise OperationError(space.w_ValueError, space.wrap(
- "Cannot use closefd=False with file name"))
+ raise oefmt(space.w_ValueError,
+ "Cannot use closefd=False with file name")
from pypy.module.posix.interp_posix import (
dispatch_filename, rposix)
@@ -219,15 +216,11 @@
def _check_readable(self, space):
if not self.readable:
- raise OperationError(
- space.w_ValueError,
- space.wrap("file not open for reading"))
+ raise oefmt(space.w_ValueError, "file not open for reading")
def _check_writable(self, space):
if not self.writable:
- raise OperationError(
- space.w_ValueError,
- space.wrap("file not open for writing"))
+ raise oefmt(space.w_ValueError, "file not open for writing")
def _close(self, space):
if self.fd < 0:
diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py
--- a/pypy/module/_io/interp_io.py
+++ b/pypy/module/_io/interp_io.py
@@ -89,25 +89,19 @@
rawmode += "+"
if universal and (writing or appending):
- raise OperationError(space.w_ValueError,
- space.wrap("can't use U and writing mode at once")
- )
+ raise oefmt(space.w_ValueError, "can't use U and writing mode at once")
if text and binary:
- raise OperationError(space.w_ValueError,
- space.wrap("can't have text and binary mode at once")
- )
+ raise oefmt(space.w_ValueError,
+ "can't have text and binary mode at once")
if reading + writing + appending > 1:
- raise OperationError(space.w_ValueError,
- space.wrap("must have exactly one of read/write/append mode")
- )
+ raise oefmt(space.w_ValueError,
+ "must have exactly one of read/write/append mode")
if binary and encoding is not None:
- raise OperationError(space.w_ValueError,
- space.wrap("binary mode doesn't take an encoding argument")
- )
+ raise oefmt(space.w_ValueError,
+ "binary mode doesn't take an encoding argument")
if binary and newline is not None:
- raise OperationError(space.w_ValueError,
- space.wrap("binary mode doesn't take a newline argument")
- )
+ raise oefmt(space.w_ValueError,
+ "binary mode doesn't take a newline argument")
w_raw = space.call_function(
space.gettypefor(W_FileIO), w_file, space.wrap(rawmode), space.wrap(closefd)
)
@@ -132,15 +126,11 @@
buffering = st.st_blksize
if buffering < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid buffering size")
- )
+ raise oefmt(space.w_ValueError, "invalid buffering size")
if buffering == 0:
if not binary:
- raise OperationError(space.w_ValueError,
- space.wrap("can't have unbuffered text I/O")
- )
+ raise oefmt(space.w_ValueError, "can't have unbuffered text I/O")
return w_raw
if updating:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -36,23 +36,17 @@
# May be called with any object
def check_readable_w(space, w_obj):
if not space.is_true(space.call_method(w_obj, 'readable')):
- raise OperationError(
- space.w_IOError,
- space.wrap("file or stream is not readable"))
+ raise oefmt(space.w_IOError, "file or stream is not readable")
# May be called with any object
def check_writable_w(space, w_obj):
if not space.is_true(space.call_method(w_obj, 'writable')):
- raise OperationError(
- space.w_IOError,
- space.wrap("file or stream is not writable"))
+ raise oefmt(space.w_IOError, "file or stream is not writable")
# May be called with any object
def check_seekable_w(space, w_obj):
if not space.is_true(space.call_method(w_obj, 'seekable')):
- raise OperationError(
- space.w_IOError,
- space.wrap("file or stream is not seekable"))
+ raise oefmt(space.w_IOError, "file or stream is not seekable")
class W_IOBase(W_Root):
@@ -129,9 +123,7 @@
def flush_w(self, space):
if self._CLOSED():
- raise OperationError(
- space.w_ValueError,
- space.wrap("I/O operation on closed file"))
+ raise oefmt(space.w_ValueError, "I/O operation on closed file")
def seek_w(self, space, w_offset, w_whence=None):
self._unsupportedoperation(space, "seek")
@@ -349,8 +341,7 @@
break
if not space.isinstance_w(w_data, space.w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "read() should return bytes"))
+ raise oefmt(space.w_TypeError, "read() should return bytes")
data = space.str_w(w_data)
if not data:
break
diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py
--- a/pypy/module/_io/interp_stringio.py
+++ b/pypy/module/_io/interp_stringio.py
@@ -89,9 +89,8 @@
self.buf = list(initval)
pos = space.getindex_w(w_pos, space.w_TypeError)
if pos < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("position value cannot be negative")
- )
+ raise oefmt(space.w_ValueError,
+ "position value cannot be negative")
self.pos = pos
if not space.is_w(w_dict, space.w_None):
if not space.isinstance_w(w_dict, space.w_dict):
@@ -203,9 +202,7 @@
elif mode == 0 and pos < 0:
raise oefmt(space.w_ValueError, "negative seek position: %d", pos)
elif mode != 0 and pos != 0:
- raise OperationError(space.w_IOError,
- space.wrap("Can't do nonzero cur-relative seeks")
- )
+ raise oefmt(space.w_IOError, "Can't do nonzero cur-relative seeks")
# XXX: this makes almost no sense, but its how CPython does it.
if mode == 1:
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -59,8 +59,8 @@
@unwrap_spec(final=int)
def decode_w(self, space, w_input, final=False):
if self.w_decoder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "IncrementalNewlineDecoder.__init__ not called"))
+ raise oefmt(space.w_ValueError,
+ "IncrementalNewlineDecoder.__init__ not called")
# decode input (with the eventual \r from a previous pass)
if not space.is_w(self.w_decoder, space.w_None):
@@ -70,8 +70,8 @@
w_output = w_input
if not space.isinstance_w(w_output, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- "decoder should return a string result"))
+ raise oefmt(space.w_TypeError,
+ "decoder should return a string result")
output = space.unicode_w(w_output)
output_len = len(output)
@@ -287,8 +287,7 @@
if space.isinstance_w(w_encoding, space.w_str):
return w_encoding
- raise OperationError(space.w_IOError, space.wrap(
- "could not determine default encoding"))
+ raise oefmt(space.w_IOError, "could not determine default encoding")
class PositionCookie(object):
def __init__(self, bigint):
@@ -377,8 +376,8 @@
newline = space.unicode_w(w_newline)
if newline and newline not in (u'\n', u'\r\n', u'\r'):
r = space.str_w(space.repr(w_newline))
- raise OperationError(space.w_ValueError, space.wrap(
- "illegal newline value: %s" % (r,)))
+ raise oefmt(space.w_ValueError,
+ "illegal newline value: %s", r)
self.line_buffering = line_buffering
@@ -429,13 +428,13 @@
def _check_init(self, space):
if self.state == STATE_ZERO:
- raise OperationError(space.w_ValueError, space.wrap(
- "I/O operation on uninitialized object"))
+ raise oefmt(space.w_ValueError,
+ "I/O operation on uninitialized object")
def _check_attached(self, space):
if self.state == STATE_DETACHED:
- raise OperationError(space.w_ValueError, space.wrap(
- "underlying buffer has been detached"))
+ raise oefmt(space.w_ValueError,
+ "underlying buffer has been detached")
self._check_init(space)
def _check_closed(self, space, message=None):
@@ -548,7 +547,7 @@
remain buffered in the decoder, yet to be converted."""
if not self.w_decoder:
- raise OperationError(space.w_IOError, space.wrap("not readable"))
+ raise oefmt(space.w_IOError, "not readable")
if self.telling:
# To prepare for tell(), we need to snapshot a point in the file
@@ -602,7 +601,7 @@
self._check_attached(space)
self._check_closed(space)
if not self.w_decoder:
- raise OperationError(space.w_IOError, space.wrap("not readable"))
+ raise oefmt(space.w_IOError, "not readable")
size = convert_size(space, w_size)
self._writeflush(space)
@@ -741,11 +740,11 @@
self._check_closed(space)
if not self.w_encoder:
- raise OperationError(space.w_IOError, space.wrap("not writable"))
+ raise oefmt(space.w_IOError, "not writable")
if not space.isinstance_w(w_text, space.w_unicode):
- msg = "unicode argument expected, got '%T'"
- raise oefmt(space.w_TypeError, msg, w_text)
+ raise oefmt(space.w_TypeError,
+ "unicode argument expected, got '%T'", w_text)
text = space.unicode_w(w_text)
textlen = len(text)
@@ -845,14 +844,13 @@
self._check_attached(space)
if not self.seekable:
- raise OperationError(space.w_IOError, space.wrap(
- "underlying stream is not seekable"))
+ raise oefmt(space.w_IOError, "underlying stream is not seekable")
if whence == 1:
# seek relative to current position
if not space.is_true(space.eq(w_pos, space.wrap(0))):
- raise OperationError(space.w_IOError, space.wrap(
- "can't do nonzero cur-relative seeks"))
+ raise oefmt(space.w_IOError,
+ "can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to sync the
# underlying buffer with the current position.
w_pos = space.call_method(self, "tell")
@@ -860,8 +858,8 @@
elif whence == 2:
# seek relative to end of file
if not space.is_true(space.eq(w_pos, space.wrap(0))):
- raise OperationError(space.w_IOError, space.wrap(
- "can't do nonzero end-relative seeks"))
+ raise oefmt(space.w_IOError,
+ "can't do nonzero end-relative seeks")
space.call_method(self, "flush")
self._set_decoded_chars(None)
self.snapshot = None
@@ -871,13 +869,14 @@
w_pos, space.wrap(whence))
elif whence != 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid whence (%d, should be 0, 1 or 2)" % (whence,)))
+ raise oefmt(space.w_ValueError,
+ "invalid whence (%d, should be 0, 1 or 2)",
+ whence)
if space.is_true(space.lt(w_pos, space.wrap(0))):
r = space.str_w(space.repr(w_pos))
- raise OperationError(space.w_ValueError, space.wrap(
- "negative seek position %s" % (r,)))
+ raise oefmt(space.w_ValueError,
+ "negative seek position %s", r)
space.call_method(self, "flush")
@@ -914,8 +913,8 @@
# Skip chars_to_skip of the decoded characters
if len(self.decoded_chars) < cookie.chars_to_skip:
- raise OperationError(space.w_IOError, space.wrap(
- "can't restore logical file position"))
+ raise oefmt(space.w_IOError,
+ "can't restore logical file position")
self.decoded_chars_used = cookie.chars_to_skip
else:
self.snapshot = PositionSnapshot(cookie.dec_flags, "")
@@ -930,12 +929,11 @@
self._check_closed(space)
if not self.seekable:
- raise OperationError(space.w_IOError, space.wrap(
- "underlying stream is not seekable"))
+ raise oefmt(space.w_IOError, "underlying stream is not seekable")
if not self.telling:
- raise OperationError(space.w_IOError, space.wrap(
- "telling position disabled by next() call"))
+ raise oefmt(space.w_IOError,
+ "telling position disabled by next() call")
self._writeflush(space)
space.call_method(self, "flush")
@@ -1008,8 +1006,8 @@
cookie.need_eof = 1
if chars_decoded < chars_to_skip:
- raise OperationError(space.w_IOError, space.wrap(
- "can't reconstruct logical file position"))
+ raise oefmt(space.w_IOError,
+ "can't reconstruct logical file position")
finally:
space.call_method(self.w_decoder, "setstate", w_saved_state)
@@ -1025,9 +1023,8 @@
self._check_attached(space)
size = space.int_w(w_size)
if size <= 0:
- raise OperationError(space.w_ValueError,
- space.wrap("a strictly positive integer is required")
- )
+ raise oefmt(space.w_ValueError,
+ "a strictly positive integer is required")
self.chunk_size = size
W_TextIOWrapper.typedef = TypeDef(
diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py
--- a/pypy/module/_locale/interp_locale.py
+++ b/pypy/module/_locale/interp_locale.py
@@ -1,7 +1,7 @@
from rpython.rlib import rposix
from rpython.rlib.rarithmetic import intmask
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib import rlocale
@@ -186,8 +186,7 @@
try:
return space.wrap(rlocale.nl_langinfo(key))
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("unsupported langinfo constant"))
+ raise oefmt(space.w_ValueError, "unsupported langinfo constant")
#___________________________________________________________________
# HAVE_LIBINTL dependence
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -1,7 +1,7 @@
import py
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import Method, Function
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
@@ -418,9 +418,9 @@
def getstats(self, space):
if self.w_callable is None:
if self.is_enabled:
- raise OperationError(space.w_RuntimeError,
- space.wrap("Profiler instance must be disabled "
- "before getting the stats"))
+ raise oefmt(space.w_RuntimeError,
+ "Profiler instance must be disabled before "
+ "getting the stats")
if self.total_timestamp:
factor = self.total_real_time / float(self.total_timestamp)
else:
diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py
--- a/pypy/module/_multibytecodec/interp_multibytecodec.py
+++ b/pypy/module/_multibytecodec/interp_multibytecodec.py
@@ -1,7 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module._multibytecodec import c_codecs
from pypy.module._codecs.interp_codecs import CodecState
@@ -57,8 +57,7 @@
try:
codec = c_codecs.getcodec(name)
except KeyError:
- raise OperationError(space.w_LookupError,
- space.wrap("no such codec is supported."))
From pypy.commits at gmail.com Mon May 2 20:52:43 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:52:43 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: merge default (oefmt pypy/module/_*)
Message-ID: <5727f65b.a272c20a.e4a91.36f1@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84158:72ab4cdc6bd2
Date: 2016-05-02 17:47 -0700
http://bitbucket.org/pypy/pypy/changeset/72ab4cdc6bd2/
Log: merge default (oefmt pypy/module/_*)
diff too long, truncating to 2000 out of 2441 lines
diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py
--- a/pypy/module/__builtin__/compiling.py
+++ b/pypy/module/__builtin__/compiling.py
@@ -3,7 +3,7 @@
"""
from pypy.interpreter.pycode import PyCode
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.astcompiler import consts, ast
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.argument import Arguments
@@ -30,8 +30,7 @@
if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST |
consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8 |
consts.PyCF_ACCEPT_NULL_BYTES):
- raise OperationError(space.w_ValueError,
- space.wrap("compile() unrecognized flags"))
+ raise oefmt(space.w_ValueError, "compile() unrecognized flags")
if not dont_inherit:
caller = ec.gettopframe_nohidden()
@@ -39,9 +38,8 @@
flags |= ec.compiler.getcodeflags(caller.getcode())
if mode not in ('exec', 'eval', 'single'):
- raise OperationError(
- space.w_ValueError,
- space.wrap("compile() arg 3 must be 'exec', 'eval' or 'single'"))
+ raise oefmt(space.w_ValueError,
+ "compile() arg 3 must be 'exec', 'eval' or 'single'")
if space.isinstance_w(w_source, space.gettypeobject(ast.W_AST.typedef)):
ast_node = ast.mod.from_object(space, w_source)
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import StaticMethod, ClassMethod
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import (
@@ -100,9 +100,9 @@
raise
w_type = w_objtype
if not space.is_true(space.issubtype(w_type, w_starttype)):
- raise OperationError(space.w_TypeError,
- space.wrap("super(type, obj): "
- "obj must be an instance or subtype of type"))
+ raise oefmt(space.w_TypeError,
+ "super(type, obj): obj must be an instance or "
+ "subtype of type")
# XXX the details of how allocate_instance() should be used are not
# really well defined
w_result = space.allocate_instance(W_Super, w_subtype)
@@ -159,21 +159,18 @@
if space.is_w(w_obj, space.w_None):
return space.wrap(self)
if space.is_w(self.w_fget, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "unreadable attribute"))
+ raise oefmt(space.w_AttributeError, "unreadable attribute")
return space.call_function(self.w_fget, w_obj)
def set(self, space, w_obj, w_value):
if space.is_w(self.w_fset, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "can't set attribute"))
+ raise oefmt(space.w_AttributeError, "can't set attribute")
space.call_function(self.w_fset, w_obj, w_value)
return space.w_None
def delete(self, space, w_obj):
if space.is_w(self.w_fdel, space.w_None):
- raise OperationError(space.w_AttributeError, space.wrap(
- "can't delete attribute"))
+ raise oefmt(space.w_AttributeError, "can't delete attribute")
space.call_function(self.w_fdel, w_obj)
return space.w_None
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -61,8 +61,7 @@
else:
w_step = space.index(w_slice.w_step)
if space.is_true(space.eq(w_step, w_0)):
- raise OperationError(space.w_ValueError,
- space.wrap("slice step cannot be zero"))
+ raise oefmt(space.w_ValueError, "slice step cannot be zero")
negative_step = space.is_true(space.lt(w_step, w_0))
if space.is_w(w_slice.w_start, space.w_None):
if negative_step:
@@ -124,16 +123,18 @@
elif len(args_w):
w_sequence = args_w[0]
else:
- msg = "%s() expects at least one argument" % (implementation_of,)
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "%s() expects at least one argument",
+ implementation_of)
w_key = None
kwds = args.keywords
if kwds:
if kwds[0] == "key" and len(kwds) == 1:
w_key = args.keywords_w[0]
else:
- msg = "%s() got unexpected keyword argument" % (implementation_of,)
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "%s() got unexpected keyword argument",
+ implementation_of)
w_iter = space.iter(w_sequence)
w_type = space.type(w_iter)
@@ -160,8 +161,7 @@
w_max_item = w_item
w_max_val = w_compare_with
if w_max_item is None:
- msg = "arg is an empty sequence"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError, "arg is an empty sequence")
return w_max_item
if unroll:
min_max_impl = jit.unroll_safe(min_max_impl)
@@ -297,8 +297,8 @@
def __init__(self, space, w_sequence):
self.remaining = space.len_w(w_sequence) - 1
if space.lookup(w_sequence, "__getitem__") is None:
- msg = "reversed() argument must be a sequence"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "reversed() argument must be a sequence")
self.w_sequence = w_sequence
@staticmethod
@@ -419,8 +419,7 @@
w_index = space.add(w_index, self.w_length)
if (space.is_true(space.ge(w_index, self.w_length)) or
space.is_true(space.lt(w_index, w_zero))):
- raise OperationError(space.w_IndexError, space.wrap(
- "range object index out of range"))
+ raise oefmt(space.w_IndexError, "range object index out of range")
return self._compute_item0(space, w_index)
def _compute_slice(self, space, w_slice):
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -28,8 +28,7 @@
try:
c = UNICHR(code)
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("chr() arg out of range"))
+ raise oefmt(space.w_ValueError, "chr() arg out of range")
return space.wrap(c)
def len(space, w_obj):
diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py
--- a/pypy/module/__pypy__/interp_builders.py
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
from rpython.rlib.rstring import UnicodeBuilder, StringBuilder
@@ -16,8 +16,8 @@
def _check_done(self, space):
if self.builder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "Can't operate on a built builder"))
+ raise oefmt(space.w_ValueError,
+ "Can't operate on a built builder")
@unwrap_spec(size=int)
def descr__new__(space, w_subtype, size=-1):
@@ -32,8 +32,7 @@
def descr_append_slice(self, space, s, start, end):
self._check_done(space)
if not 0 <= start <= end <= len(s):
- raise OperationError(space.w_ValueError, space.wrap(
- "bad start/stop"))
+ raise oefmt(space.w_ValueError, "bad start/stop")
self.builder.append_slice(s, start, end)
def descr_build(self, space):
@@ -47,8 +46,7 @@
def descr_len(self, space):
if self.builder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "no length of built builder"))
+ raise oefmt(space.w_ValueError, "no length of built builder")
return space.wrap(self.builder.getlength())
W_Builder.__name__ = "W_%s" % name
diff --git a/pypy/module/__pypy__/interp_identitydict.py b/pypy/module/__pypy__/interp_identitydict.py
--- a/pypy/module/__pypy__/interp_identitydict.py
+++ b/pypy/module/__pypy__/interp_identitydict.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.baseobjspace import W_Root
@@ -35,9 +35,9 @@
raise OperationError(space.w_KeyError, w_key)
def descr_iter(self, space):
- raise OperationError(space.w_TypeError,
- space.wrap("'identity_dict' object does not support iteration; "
- "iterate over x.keys()"))
+ raise oefmt(space.w_TypeError,
+ "'identity_dict' object does not support iteration; "
+ "iterate over x.keys()")
def get(self, space, w_key, w_default=None):
if w_default is None:
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -87,8 +87,7 @@
elif isinstance(w_obj, W_BaseSetObject):
name = w_obj.strategy.__class__.__name__
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expecting dict or list or set object"))
+ raise oefmt(space.w_TypeError, "expecting dict or list or set object")
return space.wrap(name)
@@ -102,8 +101,7 @@
@unwrap_spec(sizehint=int)
def resizelist_hint(space, w_iterable, sizehint):
if not isinstance(w_iterable, W_ListObject):
- raise OperationError(space.w_TypeError,
- space.wrap("arg 1 must be a 'list'"))
+ raise oefmt(space.w_TypeError, "arg 1 must be a 'list'")
w_iterable._resize_hint(sizehint)
@unwrap_spec(sizehint=int)
@@ -160,8 +158,7 @@
elif space.is_w(space.type(w_obj), space.w_str):
jit.promote_string(space.str_w(w_obj))
elif space.is_w(space.type(w_obj), space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- "promoting unicode unsupported"))
+ raise oefmt(space.w_TypeError, "promoting unicode unsupported")
else:
jit.promote(w_obj)
return w_obj
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -88,8 +88,7 @@
ctype = self.ctype
if not isinstance(ctype, W_CTypeFunc):
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("expected a function ctype"))
+ raise oefmt(space.w_TypeError, "expected a function ctype")
return ctype
def hide_object(self):
@@ -219,8 +218,8 @@
invoke_callback,
unique_id)
if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK:
- raise OperationError(space.w_SystemError,
- space.wrap("libffi failed to build this callback"))
+ raise oefmt(space.w_SystemError,
+ "libffi failed to build this callback")
def py_invoke(self, ll_res, ll_args):
jitdriver1.jit_merge_point(callback=self,
@@ -234,9 +233,9 @@
space = fresult.space
if isinstance(fresult, W_CTypeVoid):
if not space.is_w(w_res, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("callback with the return type 'void'"
- " must return None"))
+ raise oefmt(space.w_TypeError,
+ "callback with the return type 'void' must return "
+ "None")
return
#
small_result = encode_result_for_libffi and fresult.size < SIZE_OF_FFI_ARG
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -113,8 +113,9 @@
if requires_ordering:
if (isinstance(self.ctype, W_CTypePrimitive) or
isinstance(w_other.ctype, W_CTypePrimitive)):
- raise OperationError(space.w_TypeError, space.wrap(
- "cannot do comparison on a primitive cdata"))
+ raise oefmt(space.w_TypeError,
+ "cannot do comparison on a primitive "
+ "cdata")
ptr1 = rffi.cast(lltype.Unsigned, ptr1)
ptr2 = rffi.cast(lltype.Unsigned, ptr2)
result = op(ptr1, ptr2)
@@ -175,22 +176,18 @@
space = self.space
#
if space.is_w(w_slice.w_start, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice start must be specified"))
+ raise oefmt(space.w_IndexError, "slice start must be specified")
start = space.int_w(w_slice.w_start)
#
if space.is_w(w_slice.w_stop, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice stop must be specified"))
+ raise oefmt(space.w_IndexError, "slice stop must be specified")
stop = space.int_w(w_slice.w_stop)
#
if not space.is_w(w_slice.w_step, space.w_None):
- raise OperationError(space.w_IndexError,
- space.wrap("slice with step not supported"))
+ raise oefmt(space.w_IndexError, "slice with step not supported")
#
if start > stop:
- raise OperationError(space.w_IndexError,
- space.wrap("slice start > stop"))
+ raise oefmt(space.w_IndexError, "slice start > stop")
#
ctype = self.ctype._check_slice_index(self, start, stop)
assert isinstance(ctype, W_CTypePointer)
diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py
--- a/pypy/module/_cffi_backend/ctypearray.py
+++ b/pypy/module/_cffi_backend/ctypearray.py
@@ -40,8 +40,8 @@
try:
datasize = ovfcheck(length * self.ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
else:
length = self.length
#
@@ -55,8 +55,7 @@
def _check_subscript_index(self, w_cdata, i):
space = self.space
if i < 0:
- raise OperationError(space.w_IndexError,
- space.wrap("negative index not supported"))
+ raise oefmt(space.w_IndexError, "negative index not supported")
if i >= w_cdata.get_array_length():
raise oefmt(space.w_IndexError,
"index too large for cdata '%s' (expected %d < %d)",
@@ -66,8 +65,7 @@
def _check_slice_index(self, w_cdata, start, stop):
space = self.space
if start < 0:
- raise OperationError(space.w_IndexError,
- space.wrap("negative index not supported"))
+ raise oefmt(space.w_IndexError, "negative index not supported")
if stop > w_cdata.get_array_length():
raise oefmt(space.w_IndexError,
"index too large (expected %d <= %d)",
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -471,5 +471,5 @@
# call libffi's ffi_prep_cif() function
res = jit_libffi.jit_ffi_prep_cif(rawmem)
if res != clibffi.FFI_OK:
- raise OperationError(space.w_SystemError,
- space.wrap("libffi failed to build this function type"))
+ raise oefmt(space.w_SystemError,
+ "libffi failed to build this function type")
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -185,26 +185,24 @@
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError,
- space.wrap("field name or array index expected"))
+ raise oefmt(space.w_TypeError,
+ "field name or array index expected")
return self.typeoffsetof_index(index)
else:
return self.typeoffsetof_field(fieldname, following)
def typeoffsetof_field(self, fieldname, following):
- space = self.space
- msg = "with a field name argument, expected a struct or union ctype"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "with a field name argument, expected a struct or union "
+ "ctype")
def typeoffsetof_index(self, index):
- space = self.space
- msg = "with an integer argument, expected an array or pointer ctype"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "with an integer argument, expected an array or pointer "
+ "ctype")
def rawaddressof(self, cdata, offset):
- space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("expected a pointer ctype"))
+ raise oefmt(self.space.w_TypeError, "expected a pointer ctype")
def call(self, funcaddr, args_w):
space = self.space
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -292,8 +292,8 @@
try:
datasize = ovfcheck(length * itemsize)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
result = lltype.malloc(rffi.CCHARP.TO, datasize,
flavor='raw', zero=True)
try:
@@ -325,13 +325,12 @@
space = self.space
ctitem = self.ctitem
if ctitem.size < 0:
- raise OperationError(space.w_TypeError,
- space.wrap("pointer to opaque"))
+ raise oefmt(space.w_TypeError, "pointer to opaque")
try:
offset = ovfcheck(index * ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array offset would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array offset would overflow a ssize_t")
return ctitem, offset
def rawaddressof(self, cdata, offset):
@@ -344,9 +343,8 @@
ptr = rffi.ptradd(ptr, offset)
return cdataobj.W_CData(space, ptr, self)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expected a cdata struct/union/array/pointer"
- " object"))
+ raise oefmt(space.w_TypeError,
+ "expected a cdata struct/union/array/pointer object")
def _fget(self, attrchar):
if attrchar == 'i': # item
@@ -382,8 +380,7 @@
if w_fileobj.cffi_fileobj is None:
fd = space.int_w(space.call_method(w_fileobj, "fileno"))
if fd < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("file has no OS file descriptor"))
+ raise oefmt(space.w_ValueError, "file has no OS file descriptor")
fd = os.dup(fd)
mode = space.str_w(space.getattr(w_fileobj, space.wrap("mode")))
try:
diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
--- a/pypy/module/_cffi_backend/ctypestruct.py
+++ b/pypy/module/_cffi_backend/ctypestruct.py
@@ -94,8 +94,7 @@
except KeyError:
raise OperationError(space.w_KeyError, space.wrap(fieldname))
if cfield.bitshift >= 0:
- raise OperationError(space.w_TypeError,
- space.wrap("not supported for bitfields"))
+ raise oefmt(space.w_TypeError, "not supported for bitfields")
return (cfield.ctype, cfield.offset)
def _copy_from_same(self, cdata, w_ob):
@@ -243,8 +242,8 @@
varsize = ovfcheck(itemsize * varsizelength)
size = ovfcheck(self.offset + varsize)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
assert size >= 0
return max(size, optvarsize)
# if 'value' was only an integer, get_new_array_length() returns
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -44,8 +44,7 @@
raise oefmt(space.w_ValueError,
"ctype '%s' is of unknown size", w_obj.name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("expected a 'cdata' or 'ctype' object"))
+ raise oefmt(space.w_TypeError, "expected a 'cdata' or 'ctype' object")
return space.wrap(size)
@unwrap_spec(w_ctype=ctypeobj.W_CType)
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -1,6 +1,6 @@
from __future__ import with_statement
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib import jit
from rpython.rlib.objectmodel import specialize
@@ -289,8 +289,7 @@
try:
return _standard_object_as_bool(space, w_io)
except _NotStandardObject:
- raise OperationError(space.w_TypeError,
- space.wrap("integer/float expected"))
+ raise oefmt(space.w_TypeError, "integer/float expected")
# ____________________________________________________________
@@ -305,8 +304,7 @@
else:
explicitlength = space.getindex_w(w_value, space.w_OverflowError)
if explicitlength < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative array length"))
+ raise oefmt(space.w_ValueError, "negative array length")
return (space.w_None, explicitlength)
# ____________________________________________________________
diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -181,16 +181,14 @@
else:
length = space.getindex_w(w_length, space.w_OverflowError)
if length < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative array length"))
+ raise oefmt(space.w_ValueError, "negative array length")
return _new_array_type(space, w_ctptr, length)
@jit.elidable
def _new_array_type(space, w_ctptr, length):
_setup_wref(rweakref.has_weakref_support())
if not isinstance(w_ctptr, ctypeptr.W_CTypePointer):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a pointer ctype"))
+ raise oefmt(space.w_TypeError, "first arg must be a pointer ctype")
arrays = w_ctptr._array_types
if arrays is None:
arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray)
@@ -212,8 +210,8 @@
try:
arraysize = ovfcheck(length * ctitem.size)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("array size would overflow a ssize_t"))
+ raise oefmt(space.w_OverflowError,
+ "array size would overflow a ssize_t")
extra = '[%d]' % length
#
ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra)
@@ -290,9 +288,9 @@
sflags = complete_sflags(sflags)
if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion)
or w_ctype.size >= 0):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a non-initialized"
- " struct or union ctype"))
+ raise oefmt(space.w_TypeError,
+ "first arg must be a non-initialized struct or union "
+ "ctype")
is_union = isinstance(w_ctype, ctypestruct.W_CTypeUnion)
alignment = 1
@@ -310,8 +308,7 @@
w_field = fields_w[i]
field_w = space.fixedview(w_field)
if not (2 <= len(field_w) <= 4):
- raise OperationError(space.w_TypeError,
- space.wrap("bad field descr"))
+ raise oefmt(space.w_TypeError, "bad field descr")
fname = space.str_w(field_w[0])
ftype = space.interp_w(ctypeobj.W_CType, field_w[1])
fbitsize = -1
@@ -564,14 +561,13 @@
enumerators_w = space.fixedview(w_enumerators)
enumvalues_w = space.fixedview(w_enumvalues)
if len(enumerators_w) != len(enumvalues_w):
- raise OperationError(space.w_ValueError,
- space.wrap("tuple args must have the same size"))
+ raise oefmt(space.w_ValueError, "tuple args must have the same size")
enumerators = [space.str_w(w) for w in enumerators_w]
#
if (not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveSigned) and
not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveUnsigned)):
- raise OperationError(space.w_TypeError,
- space.wrap("expected a primitive signed or unsigned base type"))
+ raise oefmt(space.w_TypeError,
+ "expected a primitive signed or unsigned base type")
#
lvalue = lltype.malloc(rffi.CCHARP.TO, w_basectype.size, flavor='raw')
try:
@@ -601,8 +597,8 @@
fargs = []
for w_farg in space.fixedview(w_fargs):
if not isinstance(w_farg, ctypeobj.W_CType):
- raise OperationError(space.w_TypeError,
- space.wrap("first arg must be a tuple of ctype objects"))
+ raise oefmt(space.w_TypeError,
+ "first arg must be a tuple of ctype objects")
if isinstance(w_farg, ctypearray.W_CTypeArray):
w_farg = w_farg.ctptr
fargs.append(w_farg)
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -138,9 +138,7 @@
if space.is_true(space.callable(w_search_function)):
state.codec_search_path.append(w_search_function)
else:
- raise OperationError(
- space.w_TypeError,
- space.wrap("argument must be callable"))
+ raise oefmt(space.w_TypeError, "argument must be callable")
@unwrap_spec(encoding=str)
@@ -174,19 +172,17 @@
normalized_base))
state.codec_need_encodings = False
if len(state.codec_search_path) == 0:
- raise OperationError(
- space.w_LookupError,
- space.wrap("no codec search functions registered: "
- "can't find encoding"))
+ raise oefmt(space.w_LookupError,
+ "no codec search functions registered: can't find "
+ "encoding")
for w_search in state.codec_search_path:
w_result = space.call_function(w_search,
space.wrap(normalized_encoding))
if not space.is_w(w_result, space.w_None):
if not (space.isinstance_w(w_result, space.w_tuple) and
space.len_w(w_result) == 4):
- raise OperationError(
- space.w_TypeError,
- space.wrap("codec search functions must return 4-tuples"))
+ raise oefmt(space.w_TypeError,
+ "codec search functions must return 4-tuples")
else:
state.codec_search_cache[normalized_encoding] = w_result
state.modified()
@@ -204,22 +200,19 @@
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- "wrong exception"))
+ raise oefmt(space.w_TypeError, "wrong exception")
delta = space.int_w(w_end) - space.int_w(w_start)
if delta < 0 or not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- "wrong exception"))
+ raise oefmt(space.w_TypeError, "wrong exception")
def strict_errors(space, w_exc):
check_exception(space, w_exc)
if space.isinstance_w(w_exc, space.w_BaseException):
raise OperationError(space.type(w_exc), w_exc)
else:
- raise OperationError(space.w_TypeError, space.wrap(
- "codec must pass exception instance"))
+ raise oefmt(space.w_TypeError, "codec must pass exception instance")
def ignore_errors(space, w_exc):
check_exception(space, w_exc)
@@ -454,9 +447,8 @@
if space.is_true(w_decoder):
w_res = space.call_function(w_decoder, w_obj, space.wrap(errors))
if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 2):
- raise OperationError(
- space.w_TypeError,
- space.wrap("encoder must return a tuple (object, integer)"))
+ raise oefmt(space.w_TypeError,
+ "encoder must return a tuple (object, integer)")
return space.getitem(w_res, space.wrap(0))
else:
assert 0, "XXX, what to do here?"
@@ -475,9 +467,7 @@
if space.is_true(space.callable(w_handler)):
state.codec_error_registry[errors] = w_handler
else:
- raise OperationError(
- space.w_TypeError,
- space.wrap("handler must be callable"))
+ raise oefmt(space.w_TypeError, "handler must be callable")
# ____________________________________________________________
# delegation to runicode
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -4,7 +4,7 @@
from pypy.interpreter.typedef import TypeDef, make_weakref_descr
from pypy.interpreter.typedef import GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib.debug import check_nonneg
@@ -76,9 +76,8 @@
def checklock(self, lock):
if lock is not self.lock:
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(self.space.w_RuntimeError,
+ "deque mutated during iteration")
def init(self, w_iterable=None, w_maxlen=None):
space = self.space
@@ -200,8 +199,7 @@
def pop(self):
"Remove and return the rightmost element."
if self.len == 0:
- msg = "pop from an empty deque"
- raise OperationError(self.space.w_IndexError, self.space.wrap(msg))
+ raise oefmt(self.space.w_IndexError, "pop from an empty deque")
self.len -= 1
ri = self.rightindex
w_obj = self.rightblock.data[ri]
@@ -224,8 +222,7 @@
def popleft(self):
"Remove and return the leftmost element."
if self.len == 0:
- msg = "pop from an empty deque"
- raise OperationError(self.space.w_IndexError, self.space.wrap(msg))
+ raise oefmt(self.space.w_IndexError, "pop from an empty deque")
self.len -= 1
li = self.leftindex
w_obj = self.leftblock.data[li]
@@ -263,8 +260,7 @@
if index >= BLOCKLEN:
block = block.rightlink
index = 0
- raise OperationError(space.w_ValueError,
- space.wrap("deque.remove(x): x not in deque"))
+ raise oefmt(space.w_ValueError, "deque.remove(x): x not in deque")
def reverse(self):
"Reverse *IN PLACE*."
@@ -371,8 +367,7 @@
b, i = self.locate(start)
return b.data[i]
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def setitem(self, w_index, w_newobj):
space = self.space
@@ -381,8 +376,7 @@
b, i = self.locate(start)
b.data[i] = w_newobj
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def delitem(self, w_index):
space = self.space
@@ -390,8 +384,7 @@
if step == 0: # index only
self.del_item(start)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("deque[:] is not supported"))
+ raise oefmt(space.w_TypeError, "deque[:] is not supported")
def copy(self):
"Return a shallow copy of a deque."
@@ -520,13 +513,12 @@
return self.space.wrap(self.counter)
def next(self):
+ space = self.space
if self.lock is not self.deque.lock:
self.counter = 0
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(space.w_RuntimeError, "deque mutated during iteration")
if self.counter == 0:
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
self.counter -= 1
ri = self.index
w_x = self.block.data[ri]
@@ -563,13 +555,12 @@
return self.space.wrap(self.counter)
def next(self):
+ space = self.space
if self.lock is not self.deque.lock:
self.counter = 0
- raise OperationError(
- self.space.w_RuntimeError,
- self.space.wrap("deque mutated during iteration"))
+ raise oefmt(space.w_RuntimeError, "deque mutated during iteration")
if self.counter == 0:
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
self.counter -= 1
ri = self.index
w_x = self.block.data[ri]
diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py
--- a/pypy/module/_csv/interp_csv.py
+++ b/pypy/module/_csv/interp_csv.py
@@ -106,18 +106,17 @@
# validate options
if not (0 <= tmp_quoting < 4):
- raise OperationError(space.w_TypeError,
- space.wrap('bad "quoting" value'))
+ raise oefmt(space.w_TypeError, 'bad "quoting" value')
if dialect.delimiter == u'\0':
- raise OperationError(space.w_TypeError,
- space.wrap('"delimiter" must be a 1-character string'))
+ raise oefmt(space.w_TypeError,
+ '"delimiter" must be a 1-character string')
if space.is_w(w_quotechar, space.w_None) and w_quoting is None:
tmp_quoting = QUOTE_NONE
if tmp_quoting != QUOTE_NONE and dialect.quotechar == u'\0':
- raise OperationError(space.w_TypeError,
- space.wrap('quotechar must be set if quoting enabled'))
+ raise oefmt(space.w_TypeError,
+ "quotechar must be set if quoting enabled")
dialect.quoting = tmp_quoting
return dialect
diff --git a/pypy/module/_demo/demo.py b/pypy/module/_demo/demo.py
--- a/pypy/module/_demo/demo.py
+++ b/pypy/module/_demo/demo.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty
@@ -22,8 +22,7 @@
def measuretime(space, repetitions, w_callable):
if repetitions <= 0:
w_DemoError = get(space, 'DemoError')
- msg = "repetition count must be > 0"
- raise OperationError(w_DemoError, space.wrap(msg))
+ raise oefmt(w_DemoError, "repetition count must be > 0")
starttime = time(0)
for i in range(repetitions):
space.call_function(w_callable)
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -7,7 +7,7 @@
from rpython.tool.sourcetools import func_renamer
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, interp2app
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.module.thread.os_lock import Lock
@@ -85,8 +85,7 @@
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
if not digest_type:
- raise OperationError(space.w_ValueError,
- space.wrap("unknown hash function"))
+ raise oefmt(space.w_ValueError, "unknown hash function")
return digest_type
def descr_repr(self, space):
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -40,8 +40,7 @@
## self.lock.free()
self.lock = space.allocate_lock()
self.owner = 0
- self.operr = OperationError(space.w_RuntimeError,
- space.wrap("reentrant call"))
+ self.operr = oefmt(space.w_RuntimeError, "reentrant call")
def __enter__(self):
if not self.lock.acquire(False):
@@ -80,8 +79,7 @@
w_data = space.call_method(self, "read", space.wrap(length))
if not space.isinstance_w(w_data, space.w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "read() should return bytes"))
+ raise oefmt(space.w_TypeError, "read() should return bytes")
data = space.bytes_w(w_data)
if len(data) > length:
raise oefmt(space.w_ValueError,
@@ -151,8 +149,8 @@
def _init(self, space):
if self.buffer_size <= 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "buffer size must be strictly positive"))
+ raise oefmt(space.w_ValueError,
+ "buffer size must be strictly positive")
self.buffer = ['\0'] * self.buffer_size
@@ -165,11 +163,10 @@
def _check_init(self, space):
if self.state == STATE_ZERO:
- raise OperationError(space.w_ValueError, space.wrap(
- "I/O operation on uninitialized object"))
+ raise oefmt(space.w_ValueError,
+ "I/O operation on uninitialized object")
elif self.state == STATE_DETACHED:
- raise OperationError(space.w_ValueError, space.wrap(
- "raw stream has been detached"))
+ raise oefmt(space.w_ValueError, "raw stream has been detached")
def _check_closed(self, space, message=None):
self._check_init(space)
@@ -179,8 +176,8 @@
w_pos = space.call_method(self.w_raw, "tell")
pos = space.r_longlong_w(w_pos)
if pos < 0:
- raise OperationError(space.w_IOError, space.wrap(
- "raw stream returned invalid position"))
+ raise oefmt(space.w_IOError,
+ "raw stream returned invalid position")
self.abs_pos = pos
return pos
@@ -292,8 +289,8 @@
space.wrap(pos), space.wrap(whence))
pos = space.r_longlong_w(w_pos)
if pos < 0:
- raise OperationError(space.w_IOError, space.wrap(
- "Raw stream returned invalid position"))
+ raise oefmt(space.w_IOError,
+ "Raw stream returned invalid position")
self.abs_pos = pos
return pos
@@ -372,8 +369,7 @@
written = space.getindex_w(w_written, space.w_IOError)
if not 0 <= written <= len(data):
- raise OperationError(space.w_IOError, space.wrap(
- "raw write() returned invalid length"))
+ raise oefmt(space.w_IOError, "raw write() returned invalid length")
if self.abs_pos != -1:
self.abs_pos += written
return written
@@ -426,8 +422,8 @@
with self.lock:
res = self._read_generic(space, size)
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "read length must be positive or -1"))
+ raise oefmt(space.w_ValueError,
+ "read length must be positive or -1")
return space.wrapbytes(res)
@unwrap_spec(size=int)
@@ -463,8 +459,7 @@
self._check_closed(space, "read of closed file")
if size < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "read length must be positive"))
+ raise oefmt(space.w_ValueError, "read length must be positive")
if size == 0:
return space.wrapbytes("")
@@ -546,9 +541,9 @@
raise BlockingIOError()
size = space.int_w(w_size)
if size < 0 or size > length:
- raise OperationError(space.w_IOError, space.wrap(
- "raw readinto() returned invalid length %d "
- "(should have been between 0 and %d)" % (size, length)))
+ raise oefmt(space.w_IOError,
+ "raw readinto() returned invalid length %d (should "
+ "have been between 0 and %d)", size, length)
if self.abs_pos != -1:
self.abs_pos += size
return size
diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py
--- a/pypy/module/_io/interp_bytesio.py
+++ b/pypy/module/_io/interp_bytesio.py
@@ -114,8 +114,7 @@
size = space.r_longlong_w(w_size)
if size < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative size value"))
+ raise oefmt(space.w_ValueError, "negative size value")
self.truncate(size)
if size == pos:
@@ -141,16 +140,13 @@
if whence == 0:
if pos < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative seek value"))
+ raise oefmt(space.w_ValueError, "negative seek value")
elif whence == 1:
if pos > sys.maxint - self.tell():
- raise OperationError(space.w_OverflowError, space.wrap(
- "new position too large"))
+ raise oefmt(space.w_OverflowError, "new position too large")
elif whence == 2:
if pos > sys.maxint - self.getsize():
- raise OperationError(space.w_OverflowError, space.wrap(
- "new position too large"))
+ raise oefmt(space.w_OverflowError, "new position too large")
else:
raise oefmt(space.w_ValueError,
"whence must be between 0 and 2, not %d", whence)
@@ -195,8 +191,8 @@
self.write_w(space, w_content)
pos = space.int_w(w_pos)
if pos < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "position value cannot be negative"))
+ raise oefmt(space.w_ValueError,
+ "position value cannot be negative")
self.seek(pos)
if not space.is_w(w_dict, space.w_None):
space.call_method(self.getdict(space), "update", w_dict)
diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py
--- a/pypy/module/_io/interp_fileio.py
+++ b/pypy/module/_io/interp_fileio.py
@@ -1,7 +1,7 @@
from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError, oefmt
-from pypy.interpreter.error import wrap_oserror, wrap_oserror2
+from pypy.interpreter.error import (
+ OperationError, oefmt, wrap_oserror, wrap_oserror2)
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.rstring import StringBuilder
from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_EXCL
@@ -13,8 +13,7 @@
def fget(space, obj):
w_value = getattr(obj, name)
if w_value is None:
- raise OperationError(space.w_AttributeError,
- space.wrap(name))
+ raise OperationError(space.w_AttributeError, space.wrap(name))
else:
return w_value
def fset(space, obj, w_value):
@@ -22,8 +21,7 @@
def fdel(space, obj):
w_value = getattr(obj, name)
if w_value is None:
- raise OperationError(space.w_AttributeError,
- space.wrap(name))
+ raise OperationError(space.w_AttributeError, space.wrap(name))
setattr(obj, name, None)
return GetSetProperty(fget, fset, fdel, cls=cls, doc=doc)
@@ -33,8 +31,8 @@
O_APPEND = getattr(os, "O_APPEND", 0)
def _bad_mode(space):
- raise OperationError(space.w_ValueError, space.wrap(
- "Must have exactly one of read/write/create/append mode"))
+ raise oefmt(space.w_ValueError,
+ "Must have exactly one of read/write/create/append mode")
def decode_mode(space, mode):
flags = 0
@@ -79,8 +77,7 @@
readable = writable = True
plus = True
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid mode: %s" % (mode,)))
+ raise oefmt(space.w_ValueError, "invalid mode: %s", mode)
if not rwa:
_bad_mode(space)
@@ -143,8 +140,8 @@
@unwrap_spec(mode=str, closefd=int)
def descr_init(self, space, w_name, mode='r', closefd=True, w_opener=None):
if space.isinstance_w(w_name, space.w_float):
- raise OperationError(space.w_TypeError, space.wrap(
- "integer argument expected, got float"))
+ raise oefmt(space.w_TypeError,
+ "integer argument expected, got float")
fd = -1
try:
@@ -153,8 +150,7 @@
pass
else:
if fd < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative file descriptor"))
+ raise oefmt(space.w_ValueError, "negative file descriptor")
self.readable, self.writable, self.created, self.appending, flags = decode_mode(space, mode)
@@ -172,8 +168,8 @@
elif space.is_none(w_opener):
self.closefd = True
if not closefd:
- raise OperationError(space.w_ValueError, space.wrap(
- "Cannot use closefd=False with file name"))
+ raise oefmt(space.w_ValueError,
+ "Cannot use closefd=False with file name")
from pypy.module.posix.interp_posix import (
dispatch_filename, rposix)
diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py
--- a/pypy/module/_io/interp_io.py
+++ b/pypy/module/_io/interp_io.py
@@ -73,25 +73,19 @@
rawmode += "+"
if universal and (writing or appending):
- raise OperationError(space.w_ValueError,
- space.wrap("can't use U and writing mode at once")
- )
+ raise oefmt(space.w_ValueError, "can't use U and writing mode at once")
if text and binary:
- raise OperationError(space.w_ValueError,
- space.wrap("can't have text and binary mode at once")
- )
+ raise oefmt(space.w_ValueError,
+ "can't have text and binary mode at once")
if reading + writing + creating + appending > 1:
- raise OperationError(space.w_ValueError,
- space.wrap("must have exactly one of read/write/create/append mode")
- )
+ raise oefmt(space.w_ValueError,
+ "must have exactly one of read/write/create/append mode")
if binary and encoding is not None:
- raise OperationError(space.w_ValueError,
- space.wrap("binary mode doesn't take an encoding argument")
- )
+ raise oefmt(space.w_ValueError,
+ "binary mode doesn't take an encoding argument")
if binary and newline is not None:
- raise OperationError(space.w_ValueError,
- space.wrap("binary mode doesn't take a newline argument")
- )
+ raise oefmt(space.w_ValueError,
+ "binary mode doesn't take a newline argument")
w_raw = space.call_function(
space.gettypefor(W_FileIO), w_file, space.wrap(rawmode),
space.wrap(closefd), w_opener)
@@ -116,15 +110,11 @@
buffering = st.st_blksize
if buffering < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid buffering size")
- )
+ raise oefmt(space.w_ValueError, "invalid buffering size")
if buffering == 0:
if not binary:
- raise OperationError(space.w_ValueError,
- space.wrap("can't have unbuffered text I/O")
- )
+ raise oefmt(space.w_ValueError, "can't have unbuffered text I/O")
return w_raw
if updating:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -137,9 +137,7 @@
def flush_w(self, space):
if self._CLOSED():
- raise OperationError(
- space.w_ValueError,
- space.wrap("I/O operation on closed file"))
+ raise oefmt(space.w_ValueError, "I/O operation on closed file")
def seek_w(self, space, w_offset, w_whence=None):
self._unsupportedoperation(space, "seek")
@@ -361,8 +359,7 @@
break
if not space.isinstance_w(w_data, space.w_bytes):
- raise OperationError(space.w_TypeError, space.wrap(
- "read() should return bytes"))
+ raise oefmt(space.w_TypeError, "read() should return bytes")
data = space.bytes_w(w_data)
if not data:
break
diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py
--- a/pypy/module/_io/interp_stringio.py
+++ b/pypy/module/_io/interp_stringio.py
@@ -89,9 +89,8 @@
self.buf = list(initval)
pos = space.getindex_w(w_pos, space.w_TypeError)
if pos < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("position value cannot be negative")
- )
+ raise oefmt(space.w_ValueError,
+ "position value cannot be negative")
self.pos = pos
if not space.is_w(w_dict, space.w_None):
if not space.isinstance_w(w_dict, space.w_dict):
@@ -203,9 +202,7 @@
elif mode == 0 and pos < 0:
raise oefmt(space.w_ValueError, "negative seek position: %d", pos)
elif mode != 0 and pos != 0:
- raise OperationError(space.w_IOError,
- space.wrap("Can't do nonzero cur-relative seeks")
- )
+ raise oefmt(space.w_IOError, "Can't do nonzero cur-relative seeks")
# XXX: this makes almost no sense, but its how CPython does it.
if mode == 1:
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -59,8 +59,8 @@
@unwrap_spec(final=int)
def decode_w(self, space, w_input, final=False):
if self.w_decoder is None:
- raise OperationError(space.w_ValueError, space.wrap(
- "IncrementalNewlineDecoder.__init__ not called"))
+ raise oefmt(space.w_ValueError,
+ "IncrementalNewlineDecoder.__init__ not called")
# decode input (with the eventual \r from a previous pass)
if not space.is_w(self.w_decoder, space.w_None):
@@ -70,8 +70,8 @@
w_output = w_input
if not space.isinstance_w(w_output, space.w_unicode):
- raise OperationError(space.w_TypeError, space.wrap(
- "decoder should return a string result"))
+ raise oefmt(space.w_TypeError,
+ "decoder should return a string result")
output = space.unicode_w(w_output)
output_len = len(output)
@@ -302,8 +302,7 @@
if space.isinstance_w(w_encoding, space.w_unicode):
return w_encoding
- raise OperationError(space.w_IOError, space.wrap(
- "could not determine default encoding"))
+ raise oefmt(space.w_IOError, "could not determine default encoding")
class PositionCookie(object):
def __init__(self, bigint):
@@ -393,8 +392,8 @@
newline = space.unicode_w(w_newline)
if newline and newline not in (u'\n', u'\r\n', u'\r'):
r = space.str_w(space.repr(w_newline))
- raise OperationError(space.w_ValueError, space.wrap(
- "illegal newline value: %s" % (r,)))
+ raise oefmt(space.w_ValueError,
+ "illegal newline value: %s", r)
self.line_buffering = line_buffering
self.write_through = write_through
@@ -452,13 +451,13 @@
def _check_init(self, space):
if self.state == STATE_ZERO:
- raise OperationError(space.w_ValueError, space.wrap(
- "I/O operation on uninitialized object"))
+ raise oefmt(space.w_ValueError,
+ "I/O operation on uninitialized object")
def _check_attached(self, space):
if self.state == STATE_DETACHED:
- raise OperationError(space.w_ValueError, space.wrap(
- "underlying buffer has been detached"))
+ raise oefmt(space.w_ValueError,
+ "underlying buffer has been detached")
self._check_init(space)
def _check_closed(self, space, message=None):
@@ -774,8 +773,8 @@
self._unsupportedoperation(space, "not writable")
if not space.isinstance_w(w_text, space.w_unicode):
- msg = "unicode argument expected, got '%T'"
- raise oefmt(space.w_TypeError, msg, w_text)
+ raise oefmt(space.w_TypeError,
+ "unicode argument expected, got '%T'", w_text)
text = space.unicode_w(w_text)
textlen = len(text)
@@ -904,13 +903,14 @@
w_pos, space.wrap(whence))
elif whence != 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid whence (%d, should be 0, 1 or 2)" % (whence,)))
+ raise oefmt(space.w_ValueError,
+ "invalid whence (%d, should be 0, 1 or 2)",
+ whence)
if space.is_true(space.lt(w_pos, space.wrap(0))):
r = space.str_w(space.repr(w_pos))
- raise OperationError(space.w_ValueError, space.wrap(
- "negative seek position %s" % (r,)))
+ raise oefmt(space.w_ValueError,
+ "negative seek position %s", r)
space.call_method(self, "flush")
@@ -947,8 +947,8 @@
# Skip chars_to_skip of the decoded characters
if len(self.decoded_chars) < cookie.chars_to_skip:
- raise OperationError(space.w_IOError, space.wrap(
- "can't restore logical file position"))
+ raise oefmt(space.w_IOError,
+ "can't restore logical file position")
self.decoded_chars_used = cookie.chars_to_skip
else:
self.snapshot = PositionSnapshot(cookie.dec_flags, "")
@@ -967,8 +967,8 @@
"underlying stream is not seekable")
if not self.telling:
- raise OperationError(space.w_IOError, space.wrap(
- "telling position disabled by next() call"))
+ raise oefmt(space.w_IOError,
+ "telling position disabled by next() call")
self._writeflush(space)
space.call_method(self, "flush")
@@ -1041,8 +1041,8 @@
cookie.need_eof = 1
if chars_decoded < chars_to_skip:
- raise OperationError(space.w_IOError, space.wrap(
- "can't reconstruct logical file position"))
+ raise oefmt(space.w_IOError,
+ "can't reconstruct logical file position")
finally:
space.call_method(self.w_decoder, "setstate", w_saved_state)
@@ -1058,9 +1058,8 @@
self._check_attached(space)
size = space.int_w(w_size)
if size <= 0:
- raise OperationError(space.w_ValueError,
- space.wrap("a strictly positive integer is required")
- )
+ raise oefmt(space.w_ValueError,
+ "a strictly positive integer is required")
self.chunk_size = size
W_TextIOWrapper.typedef = TypeDef(
diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py
--- a/pypy/module/_locale/interp_locale.py
+++ b/pypy/module/_locale/interp_locale.py
@@ -1,7 +1,7 @@
from rpython.rlib import rposix
from rpython.rlib.rarithmetic import intmask
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib import rlocale
@@ -149,8 +149,7 @@
try:
return space.wrap(rlocale.nl_langinfo(key))
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("unsupported langinfo constant"))
+ raise oefmt(space.w_ValueError, "unsupported langinfo constant")
#___________________________________________________________________
# HAVE_LIBINTL dependence
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -1,7 +1,7 @@
import py
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import BuiltinFunction, Method, Function
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
@@ -420,9 +420,9 @@
def getstats(self, space):
if self.w_callable is None:
if self.is_enabled:
- raise OperationError(space.w_RuntimeError,
- space.wrap("Profiler instance must be disabled "
- "before getting the stats"))
+ raise oefmt(space.w_RuntimeError,
+ "Profiler instance must be disabled before "
+ "getting the stats")
if self.total_timestamp:
factor = self.total_real_time / float(self.total_timestamp)
else:
diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py
--- a/pypy/module/_multibytecodec/interp_multibytecodec.py
+++ b/pypy/module/_multibytecodec/interp_multibytecodec.py
@@ -1,7 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module._multibytecodec import c_codecs
from pypy.module._codecs.interp_codecs import CodecState
@@ -57,8 +57,7 @@
try:
codec = c_codecs.getcodec(name)
except KeyError:
- raise OperationError(space.w_LookupError,
- space.wrap("no such codec is supported."))
+ raise oefmt(space.w_LookupError, "no such codec is supported.")
return space.wrap(MultibyteCodec(name, codec))
@@ -83,5 +82,4 @@
space.wrap(e.reason)]))
def wrap_runtimeerror(space):
- raise OperationError(space.w_RuntimeError,
- space.wrap("internal codec error"))
+ raise oefmt(space.w_RuntimeError, "internal codec error")
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -86,12 +86,10 @@
def _check_readable(self, space):
if not self.flags & READABLE:
- raise OperationError(space.w_IOError,
- space.wrap("connection is write-only"))
+ raise oefmt(space.w_IOError, "connection is write-only")
def _check_writable(self, space):
if not self.flags & WRITABLE:
- raise OperationError(space.w_IOError,
- space.wrap("connection is read-only"))
+ raise oefmt(space.w_IOError, "connection is read-only")
@unwrap_spec(offset='index', size='index')
def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN):
@@ -99,20 +97,16 @@
length = len(buf)
self._check_writable(space)
if offset < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("offset is negative"))
+ raise oefmt(space.w_ValueError, "offset is negative")
if length < offset:
- raise OperationError(space.w_ValueError,
- space.wrap("buffer length < offset"))
+ raise oefmt(space.w_ValueError, "buffer length < offset")
if size == PY_SSIZE_T_MIN:
size = length - offset
elif size < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("size is negative"))
+ raise oefmt(space.w_ValueError, "size is negative")
elif offset + size > length:
- raise OperationError(space.w_ValueError,
- space.wrap("buffer length > offset + size"))
+ raise oefmt(space.w_ValueError, "buffer length > offset + size")
self.do_send_string(space, buf, offset, size)
@@ -120,8 +114,7 @@
def recv_bytes(self, space, maxlength=PY_SSIZE_T_MAX):
self._check_readable(space)
if maxlength < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("maxlength < 0"))
+ raise oefmt(space.w_ValueError, "maxlength < 0")
res, newbuf = self.do_recv_string(
space, self.BUFFER_SIZE, maxlength)
@@ -249,8 +242,7 @@
def __init__(self, space, fd, flags):
if fd == self.INVALID_HANDLE_VALUE or fd < 0:
- raise OperationError(space.w_IOError,
- space.wrap("invalid handle %d" % fd))
+ raise oefmt(space.w_IOError, "invalid handle %d", fd)
W_BaseConnection.__init__(self, flags)
self.fd = fd
@@ -301,8 +293,7 @@
self.flags &= ~READABLE
if self.flags == 0:
self.close()
- raise OperationError(space.w_IOError, space.wrap(
- "bad message length"))
+ raise oefmt(space.w_IOError, "bad message length")
if length <= buflength:
self._recvall(space, self.buffer, length)
@@ -342,8 +333,8 @@
if remaining == length:
raise OperationError(space.w_EOFError, space.w_None)
else:
- raise OperationError(space.w_IOError, space.wrap(
- "got end of file during message"))
+ raise oefmt(space.w_IOError,
+ "got end of file during message")
# XXX inefficient
for i in range(count):
buf[i] = data[i]
@@ -459,8 +450,7 @@
self.flags &= ~READABLE
if self.flags == 0:
self.close()
- raise OperationError(space.w_IOError, space.wrap(
- "bad message length"))
+ raise oefmt(space.w_IOError, "bad message length")
newbuf = lltype.malloc(rffi.CCHARP.TO, length + 1, flavor='raw')
for i in range(read_ptr[0]):
diff --git a/pypy/module/_multiprocessing/interp_memory.py b/pypy/module/_multiprocessing/interp_memory.py
--- a/pypy/module/_multiprocessing/interp_memory.py
+++ b/pypy/module/_multiprocessing/interp_memory.py
@@ -1,6 +1,6 @@
from rpython.rtyper.lltypesystem import rffi
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.mmap.interp_mmap import W_MMap
def address_of_buffer(space, w_obj):
@@ -10,5 +10,4 @@
return space.newtuple([space.wrap(address),
space.wrap(mmap.mmap.size)])
else:
- raise OperationError(space.w_TypeError, space.wrap(
- "cannot get address of buffer"))
+ raise oefmt(space.w_TypeError, "cannot get address of buffer")
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -10,7 +10,7 @@
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError, wrap_oserror
+from pypy.interpreter.error import oefmt, wrap_oserror
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import GetSetProperty, TypeDef
from pypy.module._multiprocessing.interp_connection import w_handle
@@ -250,8 +250,7 @@
if timeout < 0.0:
timeout = 0.0
elif timeout >= 0.5 * rwin32.INFINITE: # 25 days
- raise OperationError(space.w_OverflowError,
- space.wrap("timeout is too large"))
+ raise oefmt(space.w_OverflowError, "timeout is too large")
full_msecs = r_uint(int(timeout + 0.5))
# check whether we can acquire without blocking
@@ -298,9 +297,8 @@
lltype.nullptr(rffi.LONGP.TO)):
err = rwin32.GetLastError_saved()
if err == 0x0000012a: # ERROR_TOO_MANY_POSTS
- raise OperationError(
- space.w_ValueError,
- space.wrap("semaphore or lock released too many times"))
+ raise oefmt(space.w_ValueError,
+ "semaphore or lock released too many times")
else:
raise WindowsError(err, "ReleaseSemaphore")
@@ -393,23 +391,21 @@
else:
# it was not locked so undo wait and raise
sem_post(self.handle)
- raise OperationError(
- space.w_ValueError, space.wrap(
- "semaphore or lock released too many times"))
+ raise oefmt(space.w_ValueError,
+ "semaphore or lock released too many times")
else:
# This check is not an absolute guarantee that the semaphore does
# not rise above maxvalue.
if sem_getvalue(self.handle) >= self.maxvalue:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "semaphore or lock released too many times"))
+ raise oefmt(space.w_ValueError,
+ "semaphore or lock released too many times")
sem_post(self.handle)
def semlock_getvalue(self, space):
if HAVE_BROKEN_SEM_GETVALUE:
- raise OperationError(space.w_NotImplementedError, space.wrap(
- 'sem_getvalue is not implemented on this system'))
+ raise oefmt(space.w_NotImplementedError,
+ "sem_getvalue is not implemented on this system")
else:
val = sem_getvalue(self.handle)
# some posix implementations use negative numbers to indicate
@@ -492,10 +488,9 @@
def release(self, space):
if self.kind == RECURSIVE_MUTEX:
if not self._ismine():
- raise OperationError(
- space.w_AssertionError,
- space.wrap("attempt to release recursive lock"
- " not owned by thread"))
+ raise oefmt(space.w_AssertionError,
+ "attempt to release recursive lock not owned by "
+ "thread")
if self.count > 1:
self.count -= 1
return
@@ -528,8 +523,7 @@
@unwrap_spec(kind=int, value=int, maxvalue=int)
def descr_new(space, w_subtype, kind, value, maxvalue):
if kind != RECURSIVE_MUTEX and kind != SEMAPHORE:
- raise OperationError(space.w_ValueError,
- space.wrap("unrecognized kind"))
+ raise oefmt(space.w_ValueError, "unrecognized kind")
counter = space.fromcache(CounterState).getCount()
name = "/mp%d-%d" % (os.getpid(), counter)
diff --git a/pypy/module/_multiprocessing/interp_win32.py b/pypy/module/_multiprocessing/interp_win32.py
--- a/pypy/module/_multiprocessing/interp_win32.py
+++ b/pypy/module/_multiprocessing/interp_win32.py
@@ -4,7 +4,7 @@
from rpython.rtyper.tool import rffi_platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from pypy.interpreter.error import OperationError, wrap_windowserror
+from pypy.interpreter.error import oefmt, wrap_windowserror
from pypy.interpreter.function import StaticMethod
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.module._multiprocessing.interp_connection import w_handle
@@ -120,8 +120,7 @@
outputsize, inputsize, timeout, w_security):
security = space.int_w(w_security)
if security:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("expected a NULL pointer"))
+ raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
handle = _CreateNamedPipe(
name, openmode, pipemode, maxinstances,
outputsize, inputsize, timeout, rffi.NULL)
@@ -135,8 +134,7 @@
handle = handle_w(space, w_handle)
overlapped = space.int_w(w_overlapped)
if overlapped:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("expected a NULL pointer"))
+ raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
if not _ConnectNamedPipe(handle, rffi.NULL):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
@@ -176,8 +174,7 @@
security = space.int_w(w_security)
templatefile = space.int_w(w_templatefile)
if security or templatefile:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("expected a NULL pointer"))
+ raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
handle = _CreateFile(filename, access, share, rffi.NULL,
disposition, flags, rwin32.NULL_HANDLE)
diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.nestedscope import Cell
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.function import Function, Method
@@ -83,9 +83,8 @@
try:
return gateway.BuiltinCode.find(identifier)
except KeyError:
- raise OperationError(space.w_RuntimeError,
- space.wrap("cannot unpickle builtin code: "+
- identifier))
+ raise oefmt(space.w_RuntimeError,
+ "cannot unpickle builtin code: %s", identifier)
@unwrap_spec(identifier=str)
def builtin_function(space, identifier):
@@ -93,9 +92,8 @@
try:
return function.Function.find(identifier)
except KeyError:
- raise OperationError(space.w_RuntimeError,
- space.wrap("cannot unpickle builtin function: "+
- identifier))
+ raise oefmt(space.w_RuntimeError,
+ "cannot unpickle builtin function: %s", identifier)
# ___________________________________________________________________
diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py
--- a/pypy/module/_pypyjson/interp_decoder.py
+++ b/pypy/module/_pypyjson/interp_decoder.py
@@ -3,7 +3,7 @@
from rpython.rlib.objectmodel import specialize
from rpython.rlib import rfloat, runicode
from rpython.rtyper.lltypesystem import lltype, rffi
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from pypy.interpreter import unicodehelper
OVF_DIGITS = len(str(sys.maxint))
diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py
--- a/pypy/module/_random/interp_random.py
+++ b/pypy/module/_random/interp_random.py
@@ -1,6 +1,6 @@
import time
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.baseobjspace import W_Root
@@ -58,11 +58,9 @@
def setstate(self, space, w_state):
if not space.isinstance_w(w_state, space.w_tuple):
- errstring = space.wrap("state vector must be tuple")
- raise OperationError(space.w_TypeError, errstring)
+ raise oefmt(space.w_TypeError, "state vector must be tuple")
if space.len_w(w_state) != rrandom.N + 1:
- errstring = space.wrap("state vector is the wrong size")
- raise OperationError(space.w_ValueError, errstring)
+ raise oefmt(space.w_ValueError, "state vector is the wrong size")
w_zero = space.newint(0)
# independent of platfrom, since the below condition is only
# true on 32 bit platforms anyway
@@ -78,8 +76,8 @@
@unwrap_spec(k=int)
def getrandbits(self, space, k):
if k <= 0:
- strerror = space.wrap("number of bits must be greater than zero")
- raise OperationError(space.w_ValueError, strerror)
+ raise oefmt(space.w_ValueError,
+ "number of bits must be greater than zero")
bytes = ((k - 1) // 32 + 1) * 4
bytesarray = rstring.StringBuilder(bytes)
for i in range(0, bytes, 4):
diff --git a/pypy/module/_rawffi/alt/interp_ffitype.py b/pypy/module/_rawffi/alt/interp_ffitype.py
--- a/pypy/module/_rawffi/alt/interp_ffitype.py
+++ b/pypy/module/_rawffi/alt/interp_ffitype.py
@@ -4,7 +4,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
class W_FFIType(W_Root):
@@ -39,8 +39,8 @@
try:
return space.wrap(self.sizeof())
except ValueError:
- msg = "Operation not permitted on an incomplete type"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "Operation not permitted on an incomplete type")
def sizeof(self):
return intmask(self.get_ffitype().c_size)
diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py
--- a/pypy/module/_rawffi/alt/interp_funcptr.py
+++ b/pypy/module/_rawffi/alt/interp_funcptr.py
@@ -49,8 +49,8 @@
return W_FuncPtr(func, argtypes_w, w_restype)
else:
- raise OperationError(space.w_TypeError, space.wrap(
- 'function name must be a string or integer'))
+ raise oefmt(space.w_TypeError,
+ "function name must be a string or integer")
else:
@unwrap_spec(name=str)
def _getfunc(space, CDLL, w_name, w_argtypes, w_restype):
@@ -71,8 +71,7 @@
def unwrap_ffitype(space, w_argtype, allow_void=False):
res = w_argtype.get_ffitype()
if res is libffi.types.void and not allow_void:
- msg = 'void is not a valid argument type'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "void is not a valid argument type")
return res
diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py
--- a/pypy/module/_rawffi/alt/type_converter.py
+++ b/pypy/module/_rawffi/alt/type_converter.py
@@ -1,7 +1,7 @@
from rpython.rlib import libffi
from rpython.rlib import jit
from rpython.rlib.rarithmetic import r_uint
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from pypy.module._rawffi.structure import W_StructureInstance, W_Structure
from pypy.module._rawffi.alt.interp_ffitype import app_types
@@ -240,8 +240,7 @@
elif isinstance(w_structdescr, W_Structure):
return self.get_struct_rawffi(w_ffitype, w_structdescr)
else:
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("Unsupported struct shape"))
+ raise oefmt(self.space.w_TypeError, "Unsupported struct shape")
elif w_ffitype.is_void():
voidval = self.get_void(w_ffitype)
assert voidval is None
diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py
--- a/pypy/module/_rawffi/array.py
+++ b/pypy/module/_rawffi/array.py
@@ -6,7 +6,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from rpython.rtyper.lltypesystem import lltype, rffi
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module._rawffi.interp_rawffi import segfault_exception
from pypy.module._rawffi.interp_rawffi import W_DataShape, W_DataInstance
from pypy.module._rawffi.interp_rawffi import unwrap_value, wrap_value
@@ -43,9 +43,8 @@
items_w = space.unpackiterable(w_items)
iterlength = len(items_w)
if iterlength > length:
- raise OperationError(space.w_ValueError,
- space.wrap("too many items for specified"
- " array length"))
+ raise oefmt(space.w_ValueError,
+ "too many items for specified array length")
for num in range(iterlength):
w_item = items_w[num]
unwrap_value(space, write_ptr, result.ll_buffer, num,
@@ -152,12 +151,10 @@
def decodeslice(self, space, w_slice):
if not space.isinstance_w(w_slice, space.w_slice):
- raise OperationError(space.w_TypeError,
- space.wrap('index must be int or slice'))
+ raise oefmt(space.w_TypeError, "index must be int or slice")
letter = self.shape.itemcode
if letter != 'c':
- raise OperationError(space.w_TypeError,
- space.wrap("only 'c' arrays support slicing"))
+ raise oefmt(space.w_TypeError, "only 'c' arrays support slicing")
w_start = space.getattr(w_slice, space.wrap('start'))
w_stop = space.getattr(w_slice, space.wrap('stop'))
w_step = space.getattr(w_slice, space.wrap('step'))
@@ -173,11 +170,9 @@
if not space.is_w(w_step, space.w_None):
step = space.int_w(w_step)
if step != 1:
- raise OperationError(space.w_ValueError,
- space.wrap("no step support"))
+ raise oefmt(space.w_ValueError, "no step support")
if not (0 <= start <= stop <= self.length):
- raise OperationError(space.w_ValueError,
- space.wrap("slice out of bounds"))
+ raise oefmt(space.w_ValueError, "slice out of bounds")
if not self.ll_buffer:
raise segfault_exception(space, "accessing a freed array")
return start, stop
@@ -192,8 +187,7 @@
start, stop = self.decodeslice(space, w_slice)
value = space.str_w(w_value)
if start + len(value) != stop:
- raise OperationError(space.w_ValueError,
- space.wrap("cannot resize array"))
+ raise oefmt(space.w_ValueError, "cannot resize array")
ll_buffer = self.ll_buffer
for i in range(len(value)):
ll_buffer[start + i] = value[i]
diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
--- a/pypy/module/_rawffi/interp_rawffi.py
+++ b/pypy/module/_rawffi/interp_rawffi.py
@@ -142,8 +142,7 @@
for w_arg in space.unpackiterable(w_argtypes)]
def got_libffi_error(space):
- raise OperationError(space.w_SystemError,
- space.wrap("not supported by libffi"))
+ raise oefmt(space.w_SystemError, "not supported by libffi")
def wrap_dlopenerror(space, e, filename):
if e.msg:
@@ -214,8 +213,8 @@
except LibFFIError:
raise got_libffi_error(space)
else:
- raise OperationError(space.w_TypeError, space.wrap(
- "function name must be string or integer"))
+ raise oefmt(space.w_TypeError,
+ "function name must be string or integer")
w_funcptr = W_FuncPtr(space, ptr, argshapes, resshape)
space.setitem(self.w_cache, w_key, w_funcptr)
@@ -380,7 +379,6 @@
def unwrap_value(space, push_func, add_arg, argdesc, letter, w_arg):
- w = space.wrap
if letter in TYPEMAP_PTR_LETTERS:
# check for NULL ptr
if isinstance(w_arg, W_DataInstance):
@@ -402,15 +400,16 @@
else:
s = space.str_w(w_arg)
if len(s) != 1:
- raise OperationError(space.w_TypeError, w(
- "Expected string of length one as character"))
+ raise oefmt(space.w_TypeError,
+ "Expected string of length one as character")
val = s[0]
push_func(add_arg, argdesc, val)
elif letter == 'u':
s = space.unicode_w(w_arg)
if len(s) != 1:
- raise OperationError(space.w_TypeError, w(
- "Expected unicode string of length one as wide character"))
+ raise oefmt(space.w_TypeError,
+ "Expected unicode string of length one as wide "
+ "character")
val = s[0]
push_func(add_arg, argdesc, val)
else:
@@ -421,8 +420,7 @@
push_func(add_arg, argdesc, val)
return
else:
- raise OperationError(space.w_TypeError,
- space.wrap("cannot directly write value"))
+ raise oefmt(space.w_TypeError, "cannot directly write value")
unwrap_value._annspecialcase_ = 'specialize:arg(1)'
ll_typemap_iter = unrolling_iterable(LL_TYPEMAP.items())
@@ -439,8 +437,7 @@
return space.wrap(float(func(add_arg, argdesc, ll_type)))
else:
return space.wrap(func(add_arg, argdesc, ll_type))
- raise OperationError(space.w_TypeError,
- space.wrap("cannot directly read value"))
+ raise oefmt(space.w_TypeError, "cannot directly read value")
wrap_value._annspecialcase_ = 'specialize:arg(1)'
NARROW_INTEGER_TYPES = 'cbhiBIH?'
@@ -555,8 +552,7 @@
@unwrap_spec(tp_letter=str)
def accessor(space, tp_letter):
if len(tp_letter) != 1:
- raise OperationError(space.w_ValueError, space.wrap(
From pypy.commits at gmail.com Mon May 2 20:52:45 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:52:45 -0700 (PDT)
Subject: [pypy-commit] pypy oefmt: close branch before merging
Message-ID: <5727f65d.6614c20a.24e9a.39d3@mx.google.com>
Author: Philip Jenvey
Branch: oefmt
Changeset: r84159:3f627c8633f2
Date: 2016-05-02 17:50 -0700
http://bitbucket.org/pypy/pypy/changeset/3f627c8633f2/
Log: close branch before merging
From pypy.commits at gmail.com Mon May 2 20:52:47 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 17:52:47 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge oefmt, oefmt pypy/module/!(_*)
Message-ID: <5727f65f.cb9a1c0a.aed74.ffffe956@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84160:5a9a3350e29b
Date: 2016-05-02 17:51 -0700
http://bitbucket.org/pypy/pypy/changeset/5a9a3350e29b/
Log: merge oefmt, oefmt pypy/module/!(_*)
diff too long, truncating to 2000 out of 3849 lines
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -19,17 +19,16 @@
@unwrap_spec(typecode=str)
def w_array(space, w_cls, typecode, __args__):
if len(__args__.arguments_w) > 1:
- msg = 'array() takes at most 2 arguments'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "array() takes at most 2 arguments")
if len(typecode) != 1:
- msg = 'array() argument 1 must be char, not str'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "array() argument 1 must be char, not str")
typecode = typecode[0]
if space.is_w(w_cls, space.gettypeobject(W_ArrayBase.typedef)):
if __args__.keywords:
- msg = 'array.array() does not take keyword arguments'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "array.array() does not take keyword arguments")
for tc in unroll_typecodes:
if typecode == tc:
@@ -46,8 +45,9 @@
a.extend(w_initializer, True)
break
else:
- msg = 'bad typecode (must be c, b, B, u, h, H, i, I, l, L, f or d)'
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "bad typecode (must be c, b, B, u, h, H, i, I, l, L, f or "
+ "d)")
return a
@@ -209,8 +209,7 @@
Append items to array from list.
"""
if not space.isinstance_w(w_lst, space.w_list):
- raise OperationError(space.w_TypeError,
- space.wrap("arg must be list"))
+ raise oefmt(space.w_TypeError, "arg must be list")
s = self.len
try:
self.fromsequence(w_lst)
@@ -240,8 +239,8 @@
"""
s = space.getarg_w('s#', w_s)
if len(s) % self.itemsize != 0:
- msg = 'string length not a multiple of item size'
- raise OperationError(self.space.w_ValueError, self.space.wrap(msg))
+ raise oefmt(self.space.w_ValueError,
+ "string length not a multiple of item size")
oldlen = self.len
new = len(s) / self.itemsize
if not new:
@@ -271,8 +270,7 @@
if n != 0:
item = item[0:elems]
self.descr_fromstring(space, space.wrap(item))
- msg = "not enough items in file"
- raise OperationError(space.w_EOFError, space.wrap(msg))
+ raise oefmt(space.w_EOFError, "not enough items in file")
self.descr_fromstring(space, w_item)
@unwrap_spec(w_f=W_File)
@@ -301,8 +299,8 @@
if self.typecode == 'u':
self.fromsequence(w_ustr)
else:
- msg = "fromunicode() may only be called on type 'u' arrays"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "fromunicode() may only be called on type 'u' arrays")
def descr_tounicode(self, space):
""" tounicode() -> unicode
@@ -316,8 +314,8 @@
buf = rffi.cast(UNICODE_ARRAY, self._buffer_as_unsigned())
return space.wrap(rffi.wcharpsize2unicode(buf, self.len))
else:
- msg = "tounicode() may only be called on type 'u' arrays"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "tounicode() may only be called on type 'u' arrays")
def descr_buffer_info(self, space):
""" buffer_info() -> (address, length)
@@ -366,8 +364,8 @@
not 1, 2, 4, or 8 bytes in size, RuntimeError is raised.
"""
if self.itemsize not in [1, 2, 4, 8]:
- msg = "byteswap not supported for this array"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "byteswap not supported for this array")
if self.len == 0:
return
bytes = self._charbuf_start()
@@ -665,15 +663,13 @@
try:
item = item.touint()
except (ValueError, OverflowError):
- msg = 'unsigned %d-byte integer out of range' % \
- mytype.bytes
- raise OperationError(space.w_OverflowError,
- space.wrap(msg))
+ raise oefmt(space.w_OverflowError,
+ "unsigned %d-byte integer out of range",
+ mytype.bytes)
return rffi.cast(mytype.itemtype, item)
if mytype.unwrap == 'str_w' or mytype.unwrap == 'unicode_w':
if len(item) != 1:
- msg = 'array item must be char'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "array item must be char")
item = item[0]
return rffi.cast(mytype.itemtype, item)
#
@@ -816,8 +812,8 @@
self.setlen(oldlen + i)
elif (not accept_different_array
and isinstance(w_iterable, W_ArrayBase)):
- msg = "can only extend with array of same kind"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can only extend with array of same kind")
else:
self.fromsequence(w_iterable)
@@ -861,8 +857,7 @@
w_item = self.w_getitem(space, i)
if space.is_true(space.eq(w_item, w_val)):
return space.wrap(i)
- msg = 'array.index(x): x not in list'
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError, "array.index(x): x not in list")
def descr_reverse(self, space):
b = self.buffer
@@ -873,8 +868,7 @@
if i < 0:
i += self.len
if i < 0 or i >= self.len:
- msg = 'pop index out of range'
- raise OperationError(space.w_IndexError, space.wrap(msg))
+ raise oefmt(space.w_IndexError, "pop index out of range")
w_val = self.w_getitem(space, i)
while i < self.len - 1:
self.buffer[i] = self.buffer[i + 1]
@@ -916,16 +910,15 @@
def setitem(self, space, w_idx, w_item):
idx, stop, step = space.decode_index(w_idx, self.len)
if step != 0:
- msg = 'can only assign array to array slice'
- raise OperationError(self.space.w_TypeError,
- self.space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "can only assign array to array slice")
item = self.item_w(w_item)
self.buffer[idx] = item
def setitem_slice(self, space, w_idx, w_item):
if not isinstance(w_item, W_Array):
- raise OperationError(space.w_TypeError, space.wrap(
- "can only assign to a slice array"))
+ raise oefmt(space.w_TypeError,
+ "can only assign to a slice array")
start, stop, step, size = self.space.decode_index4(w_idx, self.len)
assert step != 0
if w_item.len != size or self is w_item:
diff --git a/pypy/module/binascii/interp_hexlify.py b/pypy/module/binascii/interp_hexlify.py
--- a/pypy/module/binascii/interp_hexlify.py
+++ b/pypy/module/binascii/interp_hexlify.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import ovfcheck
@@ -38,8 +38,7 @@
elif c <= 'f':
if c >= 'a':
return ord(c) - (ord('a')-10)
- raise OperationError(space.w_TypeError,
- space.wrap('Non-hexadecimal digit found'))
+ raise oefmt(space.w_TypeError, "Non-hexadecimal digit found")
_char2value._always_inline_ = True
@unwrap_spec(hexstr='bufferstr')
@@ -48,8 +47,7 @@
hexstr must contain an even number of hex digits (upper or lower case).
This function is also available as "unhexlify()".'''
if len(hexstr) & 1:
- raise OperationError(space.w_TypeError,
- space.wrap('Odd-length string'))
+ raise oefmt(space.w_TypeError, "Odd-length string")
res = StringBuilder(len(hexstr) >> 1)
for i in range(0, len(hexstr), 2):
a = _char2value(space, hexstr[i])
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -154,24 +154,24 @@
def _catch_bz2_error(space, bzerror):
if BZ_CONFIG_ERROR and bzerror == BZ_CONFIG_ERROR:
- raise OperationError(space.w_SystemError,
- space.wrap("the bz2 library was not compiled correctly"))
+ raise oefmt(space.w_SystemError,
+ "the bz2 library was not compiled correctly")
if bzerror == BZ_PARAM_ERROR:
- raise OperationError(space.w_SystemError,
- space.wrap("the bz2 library has received wrong parameters"))
+ raise oefmt(space.w_SystemError,
+ "the bz2 library has received wrong parameters")
elif bzerror == BZ_MEM_ERROR:
raise OperationError(space.w_MemoryError, space.wrap(""))
elif bzerror in (BZ_DATA_ERROR, BZ_DATA_ERROR_MAGIC):
- raise OperationError(space.w_IOError, space.wrap("invalid data stream"))
+ raise oefmt(space.w_IOError, "invalid data stream")
elif bzerror == BZ_IO_ERROR:
- raise OperationError(space.w_IOError, space.wrap("unknown IO error"))
+ raise oefmt(space.w_IOError, "unknown IO error")
elif bzerror == BZ_UNEXPECTED_EOF:
- raise OperationError(space.w_EOFError,
- space.wrap(
- "compressed file ended before the logical end-of-stream was detected"))
+ raise oefmt(space.w_EOFError,
+ "compressed file ended before the logical end-of-stream "
+ "was detected")
elif bzerror == BZ_SEQUENCE_ERROR:
- raise OperationError(space.w_RuntimeError,
- space.wrap("wrong sequence of bz2 library commands used"))
+ raise oefmt(space.w_RuntimeError,
+ "wrong sequence of bz2 library commands used")
def _new_buffer_size(current_size):
# keep doubling until we reach BIGCHUNK; then the buffer size is no
@@ -326,11 +326,9 @@
from rpython.rlib.streamio import construct_stream_tower
os_flags, universal, reading, writing, basemode, binary = decode_mode(mode)
if reading and writing:
- raise OperationError(space.w_ValueError,
- space.wrap("cannot open in read-write mode"))
+ raise oefmt(space.w_ValueError, "cannot open in read-write mode")
if basemode == "a":
- raise OperationError(space.w_ValueError,
- space.wrap("cannot append to bz2 file"))
+ raise oefmt(space.w_ValueError, "cannot append to bz2 file")
stream = open_path_helper(space.str0_w(w_path), os_flags, False)
if reading:
bz2stream = ReadBZ2Filter(space, stream, buffering)
@@ -413,8 +411,9 @@
if raw:
w_result = self.decompressor.decompress(raw)
if self.decompressor.running:
- raise OperationError(self.space.w_EOFError,
- self.space.wrap("compressed file ended before the logical end-of-the-stream was detected"))
+ raise oefmt(self.space.w_EOFError,
+ "compressed file ended before the logical "
+ "end-of-the-stream was detected")
result = self.space.str_w(w_result)
self.readlength += len(result)
else:
@@ -468,8 +467,7 @@
return self.stream.try_to_find_file_descriptor()
def write(self, s):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("file is not ready for writing"))
+ raise oefmt(self.space.w_IOError, "file is not ready for writing")
class WriteBZ2Filter(Stream):
"""Standard I/O stream filter that compresses the stream with bz2."""
@@ -492,16 +490,13 @@
return self.writtenlength
def seek(self, offset, whence):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("seek works only while reading"))
+ raise oefmt(self.space.w_IOError, "seek works only while reading")
def read(self, n):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("file is not ready for reading"))
+ raise oefmt(self.space.w_IOError, "file is not ready for reading")
def readall(self):
- raise OperationError(self.space.w_IOError,
- self.space.wrap("file is not ready for reading"))
+ raise oefmt(self.space.w_IOError, "file is not ready for reading")
def try_to_find_file_descriptor(self):
return self.stream.try_to_find_file_descriptor()
@@ -528,8 +523,8 @@
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("compresslevel must be between 1 and 9"))
+ raise oefmt(self.space.w_ValueError,
+ "compresslevel must be between 1 and 9")
bzerror = intmask(BZ2_bzCompressInit(self.bzs, compresslevel, 0, 0))
if bzerror != BZ_OK:
@@ -556,8 +551,8 @@
return self.space.wrap("")
if not self.running:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("this object was already flushed"))
+ raise oefmt(self.space.w_ValueError,
+ "this object was already flushed")
in_bufsize = datasize
@@ -582,8 +577,8 @@
def flush(self):
if not self.running:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("this object was already flushed"))
+ raise oefmt(self.space.w_ValueError,
+ "this object was already flushed")
self.running = False
with OutBuffer(self.bzs) as out:
@@ -653,8 +648,8 @@
unused_data attribute."""
if not self.running:
- raise OperationError(self.space.w_EOFError,
- self.space.wrap("end of stream was already found"))
+ raise oefmt(self.space.w_EOFError,
+ "end of stream was already found")
if data == '':
return self.space.wrap('')
@@ -705,8 +700,8 @@
given, must be a number between 1 and 9."""
if compresslevel < 1 or compresslevel > 9:
- raise OperationError(space.w_ValueError,
- space.wrap("compresslevel must be between 1 and 9"))
+ raise oefmt(space.w_ValueError,
+ "compresslevel must be between 1 and 9")
with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
in_bufsize = len(data)
@@ -770,8 +765,8 @@
if rffi.getintfield(bzs, 'c_avail_in') == 0:
BZ2_bzDecompressEnd(bzs)
- raise OperationError(space.w_ValueError, space.wrap(
- "couldn't find end of stream"))
+ raise oefmt(space.w_ValueError,
+ "couldn't find end of stream")
elif rffi.getintfield(bzs, 'c_avail_out') == 0:
out.prepare_next_chunk()
diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py
--- a/pypy/module/cStringIO/interp_stringio.py
+++ b/pypy/module/cStringIO/interp_stringio.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec
@@ -19,8 +19,7 @@
def check_closed(self):
if self.is_closed():
space = self.space
- raise OperationError(space.w_ValueError,
- space.wrap("I/O operation on closed file"))
+ raise oefmt(space.w_ValueError, "I/O operation on closed file")
def descr_flush(self):
self.check_closed()
@@ -160,7 +159,7 @@
else:
size = space.int_w(w_size)
if size < 0:
- raise OperationError(space.w_IOError, space.wrap("negative size"))
+ raise oefmt(space.w_IOError, "negative size")
self.truncate(size)
def descr_write(self, space, w_buffer):
diff --git a/pypy/module/cmath/interp_cmath.py b/pypy/module/cmath/interp_cmath.py
--- a/pypy/module/cmath/interp_cmath.py
+++ b/pypy/module/cmath/interp_cmath.py
@@ -1,7 +1,7 @@
import math
from rpython.rlib.objectmodel import specialize
from rpython.tool.sourcetools import func_with_new_name
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cmath import names_and_docstrings
from rpython.rlib import rcomplex
@@ -14,11 +14,9 @@
try:
result = c_func(x, y)
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
return result
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -3,7 +3,7 @@
from rpython.rlib.rarithmetic import r_singlefloat
from rpython.tool import leakfinder
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc
@@ -240,8 +240,8 @@
load_reflection_library(space)
except Exception:
if objectmodel.we_are_translated():
- raise OperationError(space.w_ImportError,
- space.wrap("missing reflection library %s" % reflection_library))
+ raise oefmt(space.w_ImportError,
+ "missing reflection library %s", reflection_library)
return False
return True
diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py
--- a/pypy/module/cppyy/converter.py
+++ b/pypy/module/cppyy/converter.py
@@ -100,7 +100,8 @@
return fieldptr
def _is_abstract(self, space):
- raise OperationError(space.w_TypeError, space.wrap("no converter available for '%s'" % self.name))
+ raise oefmt(space.w_TypeError,
+ "no converter available for '%s'", self.name)
def convert_argument(self, space, w_obj, address, call_local):
self._is_abstract(space)
@@ -181,14 +182,15 @@
def convert_argument(self, space, w_obj, address, call_local):
w_tc = space.findattr(w_obj, space.wrap('typecode'))
if w_tc is not None and space.str_w(w_tc) != self.typecode:
- msg = "expected %s pointer type, but received %s" % (self.typecode, space.str_w(w_tc))
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "expected %s pointer type, but received %s",
+ self.typecode, space.str_w(w_tc))
x = rffi.cast(rffi.VOIDPP, address)
try:
x[0] = rffi.cast(rffi.VOIDP, get_rawbuffer(space, w_obj))
except TypeError:
- raise OperationError(space.w_TypeError,
- space.wrap("raw buffer interface not supported"))
+ raise oefmt(space.w_TypeError,
+ "raw buffer interface not supported")
ba = rffi.cast(rffi.CCHARP, address)
ba[capi.c_function_arg_typeoffset(space)] = 'o'
@@ -208,8 +210,8 @@
try:
byteptr[0] = buf.get_raw_address()
except ValueError:
- raise OperationError(space.w_TypeError,
- space.wrap("raw buffer interface not supported"))
+ raise oefmt(space.w_TypeError,
+ "raw buffer interface not supported")
class NumericTypeConverterMixin(object):
@@ -464,8 +466,8 @@
offset = capi.c_base_offset(space, w_obj.cppclass, self.cppclass, rawobject, 1)
obj_address = capi.direct_ptradd(rawobject, offset)
return rffi.cast(capi.C_OBJECT, obj_address)
- raise oefmt(space.w_TypeError, "cannot pass %T as %s",
- w_obj, self.cppclass.name)
+ raise oefmt(space.w_TypeError,
+ "cannot pass %T as %s", w_obj, self.cppclass.name)
def convert_argument(self, space, w_obj, address, call_local):
x = rffi.cast(rffi.VOIDPP, address)
diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py
--- a/pypy/module/cppyy/executor.py
+++ b/pypy/module/cppyy/executor.py
@@ -1,6 +1,6 @@
import sys
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import jit_libffi
@@ -35,8 +35,8 @@
pass
def execute(self, space, cppmethod, cppthis, num_args, args):
- raise OperationError(space.w_TypeError,
- space.wrap('return type not available or supported'))
+ raise oefmt(space.w_TypeError,
+ "return type not available or supported")
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
from pypy.module.cppyy.interp_cppyy import FastCallNotPossible
diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py
--- a/pypy/module/cppyy/ffitypes.py
+++ b/pypy/module/cppyy/ffitypes.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi
from rpython.rlib.rarithmetic import r_singlefloat
@@ -21,8 +21,8 @@
def _unwrap_object(self, space, w_obj):
arg = space.c_int_w(w_obj)
if arg != False and arg != True:
- raise OperationError(space.w_ValueError,
- space.wrap("boolean value should be bool, or integer 1 or 0"))
+ raise oefmt(space.w_ValueError,
+ "boolean value should be bool, or integer 1 or 0")
return arg
def _wrap_object(self, space, obj):
@@ -41,16 +41,15 @@
if space.isinstance_w(w_value, space.w_int):
ival = space.c_int_w(w_value)
if ival < 0 or 256 <= ival:
- raise OperationError(space.w_ValueError,
- space.wrap("char arg not in range(256)"))
+ raise oefmt(space.w_ValueError, "char arg not in range(256)")
value = rffi.cast(rffi.CHAR, space.c_int_w(w_value))
else:
value = space.str_w(w_value)
if len(value) != 1:
- raise OperationError(space.w_ValueError,
- space.wrap("char expected, got string of size %d" % len(value)))
+ raise oefmt(space.w_ValueError,
+ "char expected, got string of size %d", len(value))
return value[0] # turn it into a "char" to the annotator
class ShortTypeMixin(object):
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1,6 +1,6 @@
import pypy.module.cppyy.capi as capi
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.baseobjspace import W_Root
@@ -195,8 +195,7 @@
args_expected = len(self.arg_defs)
args_given = len(args_w)
if args_expected < args_given or args_given < self.args_required:
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("wrong number of arguments"))
+ raise oefmt(self.space.w_TypeError, "wrong number of arguments")
# initial setup of converters, executors, and libffi (if available)
if self.converters is None:
@@ -435,8 +434,9 @@
s = self.space.str_w(self.space.getattr(args_w[i], self.space.wrap('__name__')))
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
+ raise oefmt(self.space.w_TypeError,
+ "non-matching template (got %s where %s expected)",
+ s, self.templ_args[i])
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
@@ -646,14 +646,16 @@
def get(self, w_cppinstance, w_pycppclass):
cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
if not cppinstance:
- raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance"))
+ raise oefmt(self.space.w_ReferenceError,
+ "attribute access requires an instance")
offset = self._get_offset(cppinstance)
return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset)
def set(self, w_cppinstance, w_value):
cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
if not cppinstance:
- raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance"))
+ raise oefmt(self.space.w_ReferenceError,
+ "attribute access requires an instance")
offset = self._get_offset(cppinstance)
self.converter.to_memory(self.space, w_cppinstance, w_value, offset)
return self.space.w_None
@@ -777,12 +779,12 @@
for f in overload.functions:
if 0 < f.signature().find(sig):
return W_CPPOverload(self.space, self, [f])
- raise OperationError(self.space.w_TypeError, self.space.wrap("no overload matches signature"))
+ raise oefmt(self.space.w_TypeError, "no overload matches signature")
def missing_attribute_error(self, name):
- return OperationError(
- self.space.w_AttributeError,
- self.space.wrap("%s '%s' has no attribute %s" % (self.kind, self.name, name)))
+ return oefmt(self.space.w_AttributeError,
+ "%s '%s' has no attribute %s",
+ self.kind, self.name, name)
def __eq__(self, other):
return self.handle == other.handle
@@ -1033,8 +1035,8 @@
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
- raise OperationError(self.space.w_ReferenceError,
- self.space.wrap("trying to access a NULL pointer"))
+ raise oefmt(self.space.w_ReferenceError,
+ "trying to access a NULL pointer")
# allow user to determine ownership rules on a per object level
def fget_python_owns(self, space):
@@ -1072,8 +1074,9 @@
except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name))
+ raise oefmt(self.space.w_TypeError,
+ "cannot instantiate abstract class '%s'",
+ self.cppclass.name)
def instance__eq__(self, w_other):
# special case: if other is None, compare pointer-style
@@ -1122,17 +1125,15 @@
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.len(w_as_builtin)
- raise OperationError(
- self.space.w_TypeError,
- self.space.wrap("'%s' has no length" % self.cppclass.name))
+ raise oefmt(self.space.w_TypeError,
+ "'%s' has no length", self.cppclass.name)
def instance__cmp__(self, w_other):
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.cmp(w_as_builtin, w_other)
- raise OperationError(
- self.space.w_AttributeError,
- self.space.wrap("'%s' has no attribute __cmp__" % self.cppclass.name))
+ raise oefmt(self.space.w_AttributeError,
+ "'%s' has no attribute __cmp__", self.cppclass.name)
def instance__repr__(self):
w_as_builtin = self._get_as_builtin()
@@ -1278,7 +1279,7 @@
if not w_cppclass:
w_cppclass = scope_byname(space, space.str_w(w_pycppclass))
if not w_cppclass:
- raise OperationError(space.w_TypeError,
- space.wrap("no such class: %s" % space.str_w(w_pycppclass)))
+ raise oefmt(space.w_TypeError,
+ "no such class: %s", space.str_w(w_pycppclass))
cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False)
return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -385,9 +385,8 @@
## arg = from_ref(space,
## rffi.cast(PyObject, input_arg))
## except TypeError, e:
- ## err = OperationError(space.w_TypeError,
- ## space.wrap(
- ## "could not cast arg to PyObject"))
+ ## err = oefmt(space.w_TypeError,
+ ## "could not cast arg to PyObject")
## if not catch_exception:
## raise err
## state = space.fromcache(State)
@@ -1644,11 +1643,13 @@
has_error = PyErr_Occurred(space) is not None
has_result = ret is not None
if has_error and has_result:
- raise OperationError(space.w_SystemError, space.wrap(
- "An exception was set, but function returned a value"))
+ raise oefmt(space.w_SystemError,
+ "An exception was set, but function returned a "
+ "value")
elif not expect_null and not has_error and not has_result:
- raise OperationError(space.w_SystemError, space.wrap(
- "Function returned a NULL result without setting an exception"))
+ raise oefmt(space.w_SystemError,
+ "Function returned a NULL result without setting "
+ "an exception")
if has_error:
state = space.fromcache(State)
diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
--- a/pypy/module/cpyext/buffer.py
+++ b/pypy/module/cpyext/buffer.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, CANNOT_FAIL, Py_buffer)
@@ -29,8 +29,8 @@
raise an error if the object can't support a simpler view of its memory.
0 is returned on success and -1 on error."""
- raise OperationError(space.w_TypeError, space.wrap(
- 'PyPy does not yet implement the new buffer interface'))
+ raise oefmt(space.w_TypeError,
+ "PyPy does not yet implement the new buffer interface")
@cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL)
def PyBuffer_IsContiguous(space, view, fortran):
diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py
--- a/pypy/module/cpyext/bufferobject.py
+++ b/pypy/module/cpyext/bufferobject.py
@@ -1,6 +1,6 @@
from rpython.rlib.buffer import StringBuffer, SubBuffer
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import (
cpython_api, Py_ssize_t, cpython_struct, bootstrap_function,
PyObjectFields, PyObject)
@@ -61,16 +61,15 @@
py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, buf.array._charbuf_start())
py_buf.c_b_size = buf.getlength()
else:
- raise OperationError(space.w_NotImplementedError, space.wrap(
- "buffer flavor not supported"))
+ raise oefmt(space.w_NotImplementedError, "buffer flavor not supported")
def buffer_realize(space, py_obj):
"""
Creates the buffer in the PyPy interpreter from a cpyext representation.
"""
- raise OperationError(space.w_NotImplementedError, space.wrap(
- "Don't know how to realize a buffer"))
+ raise oefmt(space.w_NotImplementedError,
+ "Don't know how to realize a buffer")
@cpython_api([PyObject], lltype.Void, header=None)
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
@@ -183,8 +183,8 @@
while ref_str.c_buffer[i] != '\0':
i += 1
if i != ref_str.c_ob_size:
- raise OperationError(space.w_TypeError, space.wrap(
- "expected string without null bytes"))
+ raise oefmt(space.w_TypeError,
+ "expected string without null bytes")
return 0
@cpython_api([PyObject], Py_ssize_t, error=-1)
@@ -211,8 +211,8 @@
# XXX always create a new string so far
py_str = rffi.cast(PyStringObject, ref[0])
if not py_str.c_buffer:
- raise OperationError(space.w_SystemError, space.wrap(
- "_PyString_Resize called on already created string"))
+ raise oefmt(space.w_SystemError,
+ "_PyString_Resize called on already created string")
try:
py_newstr = new_empty_str(space, newsize)
except MemoryError:
diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py
--- a/pypy/module/cpyext/complexobject.py
+++ b/pypy/module/cpyext/complexobject.py
@@ -5,7 +5,7 @@
make_typedescr, track_reference, from_ref)
from pypy.module.cpyext.floatobject import PyFloat_AsDouble
from pypy.objspace.std.complexobject import W_ComplexObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex")
@@ -98,8 +98,8 @@
return 0
if not PyComplex_Check(space, w_obj):
- raise OperationError(space.w_TypeError, space.wrap(
- "__complex__ should return a complex object"))
+ raise oefmt(space.w_TypeError,
+ "__complex__ should return a complex object")
assert isinstance(w_obj, W_ComplexObject)
result.c_real = w_obj.realval
diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py
--- a/pypy/module/cpyext/eval.py
+++ b/pypy/module/cpyext/eval.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.astcompiler import consts
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
@@ -103,8 +103,8 @@
elif start == Py_single_input:
mode = 'single'
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid mode parameter for compilation"))
+ raise oefmt(space.w_ValueError,
+ "invalid mode parameter for compilation")
return compiling.compile(space, w_source, filename, mode, flags)
def run_string(space, source, filename, start, w_globals, w_locals):
diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py
--- a/pypy/module/cpyext/intobject.py
+++ b/pypy/module/cpyext/intobject.py
@@ -1,6 +1,6 @@
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, build_type_checkers, bootstrap_function,
PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t)
@@ -62,8 +62,7 @@
returned, and the caller should check PyErr_Occurred() to find out whether
there was an error, or whether the value just happened to be -1."""
if w_obj is None:
- raise OperationError(space.w_TypeError,
- space.wrap("an integer is required, got NULL"))
+ raise oefmt(space.w_TypeError, "an integer is required, got NULL")
return space.int_w(space.int(w_obj))
@cpython_api([PyObject], lltype.Unsigned, error=-1)
@@ -72,8 +71,7 @@
If pylong is greater than ULONG_MAX, an OverflowError is
raised."""
if w_obj is None:
- raise OperationError(space.w_TypeError,
- space.wrap("an integer is required, got NULL"))
+ raise oefmt(space.w_TypeError, "an integer is required, got NULL")
return space.uint_w(space.int(w_obj))
@@ -118,8 +116,7 @@
Py_ssize_t.
"""
if w_obj is None:
- raise OperationError(space.w_TypeError,
- space.wrap("an integer is required, got NULL"))
+ raise oefmt(space.w_TypeError, "an integer is required, got NULL")
return space.int_w(w_obj) # XXX this is wrong on win64
LONG_MAX = int(LONG_TEST - 1)
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -5,7 +5,7 @@
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref
from pypy.objspace.std.listobject import W_ListObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyList_Check, PyList_CheckExact = build_type_checkers("List")
@@ -52,8 +52,7 @@
if not isinstance(w_list, W_ListObject):
PyErr_BadInternalCall(space)
if index < 0 or index >= w_list.length():
- raise OperationError(space.w_IndexError, space.wrap(
- "list assignment index out of range"))
+ raise oefmt(space.w_IndexError, "list assignment index out of range")
w_list.setitem(index, w_item)
return 0
@@ -66,8 +65,7 @@
if not isinstance(w_list, W_ListObject):
PyErr_BadInternalCall(space)
if index < 0 or index >= w_list.length():
- raise OperationError(space.w_IndexError, space.wrap(
- "list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
w_list.ensure_object_strategy() # make sure we can return a borrowed obj
# XXX ^^^ how does this interact with CPyListStrategy?
w_res = w_list.getitem(index)
@@ -103,8 +101,7 @@
len(list) on a list object.
"""
if not PyList_Check(space, ref):
- raise OperationError(space.w_TypeError,
- space.wrap("expected list object"))
+ raise oefmt(space.w_TypeError, "expected list object")
return PyList_GET_SIZE(space, ref)
@cpython_api([PyObject], PyObject)
diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -73,8 +73,8 @@
flags = rffi.cast(lltype.Signed, self.ml.c_ml_flags)
flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST)
if space.is_true(w_kw) and not flags & METH_KEYWORDS:
- raise OperationError(space.w_TypeError, space.wrap(
- self.name + "() takes no keyword arguments"))
+ raise oefmt(space.w_TypeError,
+ "%s() takes no keyword arguments", self.name)
func = rffi.cast(PyCFunction, self.ml.c_ml_meth)
length = space.int_w(space.len(w_args))
@@ -84,8 +84,8 @@
elif flags & METH_NOARGS:
if length == 0:
return generic_cpy_call(space, func, w_self, None)
- raise OperationError(space.w_TypeError, space.wrap(
- self.name + "() takes no arguments"))
+ raise oefmt(space.w_TypeError,
+ "%s() takes no arguments", self.name)
elif flags & METH_O:
if length != 1:
raise oefmt(space.w_TypeError,
@@ -280,7 +280,8 @@
cfunction = space.interp_w(W_PyCFunctionObject, w_obj)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise oefmt(space.w_SystemError, "bad argument to internal function")
+ raise oefmt(space.w_SystemError,
+ "bad argument to internal function")
raise
return cfunction.ml.c_ml_meth
diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py
--- a/pypy/module/cpyext/modsupport.py
+++ b/pypy/module/cpyext/modsupport.py
@@ -8,7 +8,7 @@
PyMethodDef, PyDescr_NewClassMethod, PyStaticMethod_New)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.module.cpyext.state import State
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
#@cpython_api([rffi.CCHARP], PyObject)
def PyImport_AddModule(space, name):
@@ -87,16 +87,17 @@
if w_type is None:
if flags & METH_CLASS or flags & METH_STATIC:
- raise OperationError(space.w_ValueError,
- space.wrap("module functions cannot set METH_CLASS or METH_STATIC"))
+ raise oefmt(space.w_ValueError,
+ "module functions cannot set METH_CLASS or "
+ "METH_STATIC")
w_obj = space.wrap(W_PyCFunctionObject(space, method, w_self, w_name))
else:
if methodname in dict_w and not (flags & METH_COEXIST):
continue
if flags & METH_CLASS:
if flags & METH_STATIC:
- raise OperationError(space.w_ValueError,
- space.wrap("method cannot be both class and static"))
+ raise oefmt(space.w_ValueError,
+ "method cannot be both class and static")
w_obj = PyDescr_NewClassMethod(space, w_type, method)
elif flags & METH_STATIC:
w_func = PyCFunction_NewEx(space, method, None, None)
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -3,7 +3,7 @@
Numpy C-API for PyPy - S. H. Muller, 2013/07/26
"""
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL
from pypy.module.cpyext.api import PyObject
@@ -126,15 +126,16 @@
parameter is NULL.
"""
if requirements not in (0, ARRAY_DEFAULT):
- raise OperationError(space.w_NotImplementedError, space.wrap(
- '_PyArray_FromAny called with not-implemented requirements argument'))
+ raise oefmt(space.w_NotImplementedError,
+ "_PyArray_FromAny called with not-implemented "
+ "requirements argument")
w_array = array(space, w_obj, w_dtype=w_dtype, copy=False)
if min_depth !=0 and len(w_array.get_shape()) < min_depth:
- raise OperationError(space.w_ValueError, space.wrap(
- 'object of too small depth for desired array'))
+ raise oefmt(space.w_ValueError,
+ "object of too small depth for desired array")
elif max_depth !=0 and len(w_array.get_shape()) > max_depth:
- raise OperationError(space.w_ValueError, space.wrap(
- 'object of too deep for desired array'))
+ raise oefmt(space.w_ValueError,
+ "object of too deep for desired array")
elif w_array.is_scalar():
# since PyArray_DATA() fails on scalars, create a 1D array and set empty
# shape. So the following combination works for *reading* scalars:
@@ -153,25 +154,26 @@
dtype = get_dtype_cache(space).dtypes_by_num[typenum]
return dtype
except KeyError:
- raise OperationError(space.w_ValueError, space.wrap(
- 'PyArray_DescrFromType called with invalid dtype %d' % typenum))
+ raise oefmt(space.w_ValueError,
+ "PyArray_DescrFromType called with invalid dtype %d",
+ typenum)
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject, header=HEADER)
def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth):
try:
dtype = get_dtype_cache(space).dtypes_by_num[typenum]
except KeyError:
- raise OperationError(space.w_ValueError, space.wrap(
- '_PyArray_FromObject called with invalid dtype %d' % typenum))
+ raise oefmt(space.w_ValueError,
+ "_PyArray_FromObject called with invalid dtype %d",
+ typenum)
try:
return _PyArray_FromAny(space, w_obj, dtype, min_depth, max_depth,
0, NULL);
except OperationError as e:
if e.match(space, space.w_NotImplementedError):
errstr = space.str_w(e.get_w_value(space))
- errstr = '_PyArray_FromObject' + errstr[16:]
- raise OperationError(space.w_NotImplementedError, space.wrap(
- errstr))
+ raise oefmt(space.w_NotImplementedError,
+ "_PyArray_FromObject %s", errstr[16:])
raise
def get_shape_and_dtype(space, nd, dims, typenum):
@@ -214,8 +216,7 @@
rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject, header=HEADER)
def _PyArray_New(space, subtype, nd, dims, typenum, strides, data, itemsize, flags, obj):
if strides:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("strides must be NULL"))
+ raise oefmt(space.w_NotImplementedError, "strides must be NULL")
order = CORDER if flags & ARRAY_C_CONTIGUOUS else FORTRANORDER
owning = True if flags & ARRAY_OWNDATA else False
diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py
--- a/pypy/module/cpyext/number.py
+++ b/pypy/module/cpyext/number.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, Py_ssize_t
from pypy.module.cpyext.pyobject import PyObject, PyObjectP, from_ref, make_ref, Py_DecRef
from rpython.rtyper.lltypesystem import rffi, lltype
@@ -154,7 +154,8 @@
@cpython_api([PyObject, PyObject, PyObject], PyObject)
def PyNumber_InPlacePower(space, w_o1, w_o2, w_o3):
if not space.is_w(w_o3, space.w_None):
- raise OperationError(space.w_ValueError, space.wrap(
- "PyNumber_InPlacePower with non-None modulus is not supported"))
+ raise oefmt(space.w_ValueError,
+ "PyNumber_InPlacePower with non-None modulus is not "
+ "supported")
return space.inplace_pow(w_o1, w_o2)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -10,7 +10,7 @@
from pypy.module.cpyext.typeobject import PyTypeObjectPtr
from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall
from pypy.objspace.std.typeobject import W_TypeObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
import pypy.module.__builtin__.operation as operation
@@ -382,17 +382,15 @@
try:
w_meth = space.getattr(w_obj, space.wrap('fileno'))
except OperationError:
- raise OperationError(
- space.w_TypeError, space.wrap(
- "argument must be an int, or have a fileno() method."))
+ raise oefmt(space.w_TypeError,
+ "argument must be an int, or have a fileno() method.")
else:
w_fd = space.call_function(w_meth)
fd = space.int_w(w_fd)
if fd < 0:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "file descriptor cannot be a negative integer"))
+ raise oefmt(space.w_ValueError,
+ "file descriptor cannot be a negative integer")
return rffi.cast(rffi.INT_real, fd)
@@ -415,7 +413,7 @@
allowing a type to explicitly indicate to the interpreter that it is not
hashable.
"""
- raise OperationError(space.w_TypeError, space.wrap("unhashable type"))
+ raise oefmt(space.w_TypeError, "unhashable type")
@cpython_api([PyObject], PyObject)
def PyObject_Dir(space, w_o):
@@ -438,12 +436,11 @@
pb = pto.c_tp_as_buffer
if not (pb and pb.c_bf_getreadbuffer and pb.c_bf_getsegcount):
- raise OperationError(space.w_TypeError, space.wrap(
- "expected a character buffer object"))
+ raise oefmt(space.w_TypeError, "expected a character buffer object")
if generic_cpy_call(space, pb.c_bf_getsegcount,
obj, lltype.nullptr(Py_ssize_tP.TO)) != 1:
- raise OperationError(space.w_TypeError, space.wrap(
- "expected a single-segment buffer object"))
+ raise oefmt(space.w_TypeError,
+ "expected a single-segment buffer object")
size = generic_cpy_call(space, pb.c_bf_getcharbuffer,
obj, 0, bufferp)
if size < 0:
@@ -486,9 +483,7 @@
provides a subset of CPython's behavior.
"""
if flags & PyBUF_WRITABLE and readonly:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "Object is not writable"))
+ raise oefmt(space.w_ValueError, "Object is not writable")
view.c_buf = buf
view.c_len = length
view.c_obj = obj
diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py
--- a/pypy/module/cpyext/pyerrors.py
+++ b/pypy/module/cpyext/pyerrors.py
@@ -1,7 +1,7 @@
import os
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter import pytraceback
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
@@ -110,12 +110,11 @@
argument. It is mostly for internal use. In CPython this function always
raises an exception and returns 0 in all cases, hence the (ab)use of the
error indicator."""
- raise OperationError(space.w_TypeError,
- space.wrap("bad argument type for built-in operation"))
+ raise oefmt(space.w_TypeError, "bad argument type for built-in operation")
@cpython_api([], lltype.Void)
def PyErr_BadInternalCall(space):
- raise OperationError(space.w_SystemError, space.wrap("Bad internal call!"))
+ raise oefmt(space.w_SystemError, "Bad internal call!")
@cpython_api([], PyObject, error=CANNOT_FAIL)
def PyErr_NoMemory(space):
diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py
--- a/pypy/module/cpyext/pystrtod.py
+++ b/pypy/module/cpyext/pystrtod.py
@@ -1,5 +1,5 @@
import errno
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import cpython_api, CONST_STRING
from pypy.module.cpyext.pyobject import PyObject
from rpython.rlib import rdtoa
@@ -63,9 +63,8 @@
endpos = (rffi.cast(rffi.LONG, endptr[0]) -
rffi.cast(rffi.LONG, s))
if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
- raise OperationError(
- space.w_ValueError,
- space.wrap('invalid input at position %s' % endpos))
+ raise oefmt(space.w_ValueError,
+ "invalid input at position %d", endpos)
err = rffi.cast(lltype.Signed, rposix._get_errno())
if err == errno.ERANGE:
rposix._set_errno(rffi.cast(rffi.INT, 0))
@@ -75,8 +74,7 @@
else:
return -rfloat.INFINITY
else:
- raise OperationError(w_overflow_exception,
- space.wrap('value too large'))
+ raise oefmt(w_overflow_exception, "value too large")
return result
finally:
if not user_endptr:
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -63,8 +63,9 @@
return w_obj.getitem(index)
elif isinstance(w_obj, tupleobject.W_TupleObject):
return w_obj.wrappeditems[index]
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_GET_ITEM called but object is not a list or sequence'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_GET_ITEM called but object is not a list or "
+ "sequence")
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
def PySequence_Fast_GET_SIZE(space, w_obj):
@@ -77,8 +78,9 @@
return w_obj.length()
elif isinstance(w_obj, tupleobject.W_TupleObject):
return len(w_obj.wrappeditems)
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_GET_SIZE called but object is not a list or sequence'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_GET_SIZE called but object is not a list or "
+ "sequence")
@cpython_api([PyObject], PyObjectP)
def PySequence_Fast_ITEMS(space, w_obj):
@@ -93,8 +95,9 @@
cpy_strategy = space.fromcache(CPyListStrategy)
if w_obj.strategy is cpy_strategy:
return w_obj.get_raw_items() # asserts it's a cpyext strategy
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_ITEMS called but object is not the result of PySequence_Fast'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_ITEMS called but object is not the result of "
+ "PySequence_Fast")
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject)
def PySequence_GetSlice(space, w_obj, start, end):
@@ -227,8 +230,7 @@
return idx
idx += 1
- raise OperationError(space.w_ValueError, space.wrap(
- "sequence.index(x): x not in sequence"))
+ raise oefmt(space.w_ValueError, "sequence.index(x): x not in sequence")
class CPyListStrategy(ListStrategy):
erase, unerase = rerased.new_erasing_pair("empty")
@@ -263,8 +265,8 @@
def getslice(self, w_list, start, stop, step, length):
#storage = self.unerase(w_list.lstorage)
- raise OperationError(w_list.space.w_NotImplementedError, w_list.space.wrap(
- "settting a slice of a PySequence_Fast is not supported"))
+ raise oefmt(w_list.space.w_NotImplementedError,
+ "settting a slice of a PySequence_Fast is not supported")
def getitems(self, w_list):
# called when switching list strategy, so convert storage
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
--- a/pypy/module/cpyext/setobject.py
+++ b/pypy/module/cpyext/setobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
build_type_checkers)
@@ -85,8 +85,7 @@
len(anyset). Raises a PyExc_SystemError if anyset is not a set, frozenset,
or an instance of a subtype."""
if not PySet_Check(space, ref):
- raise OperationError(space.w_TypeError,
- space.wrap("expected set object"))
+ raise oefmt(space.w_TypeError, "expected set object")
return PySet_GET_SIZE(space, ref)
@cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -35,8 +35,8 @@
def check_num_args(space, w_ob, n):
from pypy.module.cpyext.tupleobject import PyTuple_CheckExact
if not PyTuple_CheckExact(space, w_ob):
- raise OperationError(space.w_SystemError,
- space.wrap("PyArg_UnpackTuple() argument list is not a tuple"))
+ raise oefmt(space.w_SystemError,
+ "PyArg_UnpackTuple() argument list is not a tuple")
if n == space.len_w(w_ob):
return
raise oefmt(space.w_TypeError,
@@ -46,8 +46,8 @@
def check_num_argsv(space, w_ob, low, high):
from pypy.module.cpyext.tupleobject import PyTuple_CheckExact
if not PyTuple_CheckExact(space, w_ob):
- raise OperationError(space.w_SystemError,
- space.wrap("PyArg_UnpackTuple() argument list is not a tuple"))
+ raise oefmt(space.w_SystemError,
+ "PyArg_UnpackTuple() argument list is not a tuple")
if low <=space.len_w(w_ob) <= high:
return
raise oefmt(space.w_TypeError,
@@ -183,9 +183,7 @@
if w_type is space.w_None:
w_type = None
if w_obj is None and w_type is None:
- raise OperationError(
- space.w_TypeError,
- space.wrap("__get__(None, None) is invalid"))
+ raise oefmt(space.w_TypeError, "__get__(None, None) is invalid")
return generic_cpy_call(space, func_target, w_self, w_obj, w_type)
def wrap_descr_set(space, w_self, w_args, func):
diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
--- a/pypy/module/cpyext/state.py
+++ b/pypy/module/cpyext/state.py
@@ -1,6 +1,6 @@
from rpython.rlib.objectmodel import we_are_translated
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.executioncontext import AsyncAction
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import llhelper
@@ -52,8 +52,9 @@
self.clear_exception()
raise operror
if always:
- raise OperationError(self.space.w_SystemError, self.space.wrap(
- "Function returned an error result without setting an exception"))
+ raise oefmt(self.space.w_SystemError,
+ "Function returned an error result without setting an "
+ "exception")
def build_api(self, space):
"""NOT_RPYTHON
diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py
--- a/pypy/module/cpyext/structmember.py
+++ b/pypy/module/cpyext/structmember.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.structmemberdefs import *
@@ -80,8 +80,7 @@
w_name = space.wrap(rffi.charp2str(w_member.c_name))
raise OperationError(space.w_AttributeError, w_name)
else:
- raise OperationError(space.w_SystemError,
- space.wrap("bad memberdescr type"))
+ raise oefmt(space.w_SystemError, "bad memberdescr type")
return w_result
@@ -95,16 +94,15 @@
if (flags & READONLY or
member_type in [T_STRING, T_STRING_INPLACE]):
- raise OperationError(space.w_TypeError,
- space.wrap("readonly attribute"))
+ raise oefmt(space.w_TypeError, "readonly attribute")
elif w_value is None:
if member_type == T_OBJECT_EX:
if not rffi.cast(PyObjectP, addr)[0]:
w_name = space.wrap(rffi.charp2str(w_member.c_name))
raise OperationError(space.w_AttributeError, w_name)
elif member_type != T_OBJECT:
- raise OperationError(space.w_TypeError,
- space.wrap("can't delete numeric/char attribute"))
+ raise oefmt(space.w_TypeError,
+ "can't delete numeric/char attribute")
for converter in integer_converters:
typ, lltyp, getter = converter
@@ -117,8 +115,7 @@
if member_type == T_CHAR:
str_value = space.str_w(w_value)
if len(str_value) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("string of length 1 expected"))
+ raise oefmt(space.w_TypeError, "string of length 1 expected")
array = rffi.cast(rffi.CCHARP, addr)
array[0] = str_value[0]
elif member_type in [T_OBJECT, T_OBJECT_EX]:
@@ -127,6 +124,5 @@
Py_DecRef(space, array[0])
array[0] = make_ref(space, w_value)
else:
- raise OperationError(space.w_SystemError,
- space.wrap("bad memberdescr type"))
+ raise oefmt(space.w_SystemError, "bad memberdescr type")
return 0
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -163,7 +163,7 @@
assert space.int_w(space.getitem(w_l, space.wrap(1))) == 2
assert space.int_w(space.getitem(w_l, space.wrap(0))) == 1
e = py.test.raises(OperationError, space.getitem, w_l, space.wrap(15))
- assert "list index out of range" in e.exconly()
+ assert "list index out of range" in e.value.errorstr(space)
assert space.int_w(space.getitem(w_l, space.wrap(-1))) == 4
space.setitem(w_l, space.wrap(1), space.wrap(13))
assert space.int_w(space.getitem(w_l, space.wrap(1))) == 13
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.debug import fatalerror_notb
from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
@@ -142,8 +142,7 @@
ref = rffi.cast(PyTupleObject, ref)
size = ref.c_ob_size
if index < 0 or index >= size:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple assignment index out of range"))
+ raise oefmt(space.w_IndexError, "tuple assignment index out of range")
old_ref = ref.c_ob_item[index]
ref.c_ob_item[index] = py_obj # consumes a reference
if old_ref:
@@ -158,8 +157,7 @@
ref = rffi.cast(PyTupleObject, ref)
size = ref.c_ob_size
if index < 0 or index >= size:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
return ref.c_ob_item[index] # borrowed ref
@cpython_api([PyObject], Py_ssize_t, error=-1)
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -7,7 +7,7 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root, DescrMismatch
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.typedef import (GetSetProperty, TypeDef,
interp_attrproperty, interp_attrproperty, interp2app)
from pypy.module.__builtin__.abstractinst import abstract_issubclass_w
@@ -448,8 +448,8 @@
def str_getreadbuffer(space, w_str, segment, ref):
from pypy.module.cpyext.bytesobject import PyString_AsString
if segment != 0:
- raise OperationError(space.w_SystemError, space.wrap
- ("accessing non-existent string segment"))
+ raise oefmt(space.w_SystemError,
+ "accessing non-existent string segment")
pyref = make_ref(space, w_str)
ref[0] = PyString_AsString(space, pyref)
# Stolen reference: the object has better exist somewhere else
@@ -461,8 +461,8 @@
def str_getcharbuffer(space, w_str, segment, ref):
from pypy.module.cpyext.bytesobject import PyString_AsString
if segment != 0:
- raise OperationError(space.w_SystemError, space.wrap
- ("accessing non-existent string segment"))
+ raise oefmt(space.w_SystemError,
+ "accessing non-existent string segment")
pyref = make_ref(space, w_str)
ref[0] = PyString_AsString(space, pyref)
# Stolen reference: the object has better exist somewhere else
@@ -474,8 +474,8 @@
def buf_getreadbuffer(space, pyref, segment, ref):
from pypy.module.cpyext.bufferobject import PyBufferObject
if segment != 0:
- raise OperationError(space.w_SystemError, space.wrap
- ("accessing non-existent string segment"))
+ raise oefmt(space.w_SystemError,
+ "accessing non-existent string segment")
py_buf = rffi.cast(PyBufferObject, pyref)
ref[0] = py_buf.c_b_ptr
#Py_DecRef(space, pyref)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.unicodedata import unicodedb
from pypy.module.cpyext.api import (
@@ -226,8 +226,7 @@
# Don't use PyUnicode_Check, it will realize the object :-(
w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type))
if not space.is_true(space.issubtype(w_type, space.w_unicode)):
- raise OperationError(space.w_TypeError,
- space.wrap("expected unicode object"))
+ raise oefmt(space.w_TypeError, "expected unicode object")
return PyUnicode_AS_UNICODE(space, ref)
@cpython_api([PyObject], Py_ssize_t, error=-1)
@@ -314,8 +313,8 @@
codec."""
w_str = PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors)
if not PyString_Check(space, w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "encoder did not return a string object"))
+ raise oefmt(space.w_TypeError,
+ "encoder did not return a string object")
return w_str
@cpython_api([PyObject], PyObject)
@@ -400,8 +399,7 @@
All other objects, including Unicode objects, cause a TypeError to be
set."""
if not encoding:
- raise OperationError(space.w_TypeError,
- space.wrap("decoding Unicode is not supported"))
+ raise oefmt(space.w_TypeError, "decoding Unicode is not supported")
w_encoding = space.wrap(rffi.charp2str(encoding))
if errors:
w_errors = space.wrap(rffi.charp2str(errors))
@@ -420,8 +418,7 @@
raise
w_meth = None
if w_meth is None:
- raise OperationError(space.w_TypeError,
- space.wrap("decoding Unicode is not supported"))
+ raise oefmt(space.w_TypeError, "decoding Unicode is not supported")
return space.call_function(w_meth, w_encoding, w_errors)
@cpython_api([CONST_STRING], PyObject)
@@ -459,8 +456,8 @@
# XXX always create a new string so far
py_uni = rffi.cast(PyUnicodeObject, ref[0])
if not py_uni.c_str:
- raise OperationError(space.w_SystemError, space.wrap(
- "PyUnicode_Resize called on already created string"))
+ raise oefmt(space.w_SystemError,
+ "PyUnicode_Resize called on already created string")
try:
py_newuni = new_empty_unicode(space, newsize)
except MemoryError:
diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py
--- a/pypy/module/exceptions/interp_exceptions.py
+++ b/pypy/module/exceptions/interp_exceptions.py
@@ -76,7 +76,7 @@
from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict,
descr_set_dict, descr_del_dict)
from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rlib import rwin32
@@ -157,7 +157,8 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("setting exceptions's dictionary to a non-dict"))
+ raise oefmt(space.w_TypeError,
+ "setting exceptions's dictionary to a non-dict")
self.w_dict = w_dict
def descr_reduce(self, space):
@@ -177,8 +178,7 @@
if w_msg is not None:
return w_msg
if self.w_message is None:
- raise OperationError(space.w_AttributeError,
- space.wrap("message was deleted"))
+ raise oefmt(space.w_AttributeError, "message was deleted")
msg = "BaseException.message has been deprecated as of Python 2.6"
space.warn(space.wrap(msg), space.w_DeprecationWarning)
return self.w_message
diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py
--- a/pypy/module/fcntl/interp_fcntl.py
+++ b/pypy/module/fcntl/interp_fcntl.py
@@ -1,6 +1,6 @@
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError, wrap_oserror, oefmt
+from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from rpython.rlib import rposix
from rpython.translator.tool.cbuild import ExternalCompilationInfo
@@ -174,8 +174,7 @@
elif op & LOCK_EX:
l_type = F_WRLCK
else:
- raise OperationError(space.w_ValueError,
- space.wrap("unrecognized lock operation"))
+ raise oefmt(space.w_ValueError, "unrecognized lock operation")
op = [F_SETLKW, F_SETLK][int(bool(op & LOCK_NB))]
op = rffi.cast(rffi.INT, op) # C long => C int
@@ -230,9 +229,9 @@
lltype.free(ll_arg, flavor='raw')
if mutate_flag != -1:
- raise OperationError(space.w_TypeError, space.wrap(
- "ioctl requires a file or file descriptor, an integer "
- "and optionally an integer or buffer argument"))
+ raise oefmt(space.w_TypeError,
+ "ioctl requires a file or file descriptor, an integer and "
+ "optionally an integer or buffer argument")
try:
arg = space.getarg_w('s#', w_arg)
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -1,5 +1,5 @@
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rlib import rgc
@@ -39,8 +39,7 @@
def enable_finalizers(space):
if space.user_del_action.finalizers_lock_count == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("finalizers are already enabled"))
+ raise oefmt(space.w_ValueError, "finalizers are already enabled")
space.user_del_action.finalizers_lock_count -= 1
space.user_del_action.fire()
@@ -53,8 +52,7 @@
def dump_heap_stats(space, filename):
tb = rgc._heap_stats()
if not tb:
- raise OperationError(space.w_RuntimeError,
- space.wrap("Wrong GC"))
+ raise oefmt(space.w_RuntimeError, "Wrong GC")
f = open(filename, mode="w")
for i in range(len(tb)):
f.write("%d %d " % (tb[i].count, tb[i].size))
diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py
--- a/pypy/module/gc/referents.py
+++ b/pypy/module/gc/referents.py
@@ -2,7 +2,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import wrap_oserror, OperationError
+from pypy.interpreter.error import oefmt, wrap_oserror
from rpython.rlib.objectmodel import we_are_translated
@@ -41,8 +41,8 @@
return gcref
def missing_operation(space):
- return OperationError(space.w_NotImplementedError,
- space.wrap("operation not implemented by this GC"))
+ return oefmt(space.w_NotImplementedError,
+ "operation not implemented by this GC")
# ____________________________________________________________
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -156,8 +156,7 @@
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_ValueError, space.wrap(
- "__package__ set to non-string"))
+ raise oefmt(space.w_ValueError, "__package__ set to non-string")
if ctxt_package is not None:
# __package__ is set, so use it
@@ -167,10 +166,11 @@
dot_position = _get_dot_position(ctxt_package, level - 1)
if dot_position < 0:
if len(ctxt_package) == 0:
- msg = "Attempted relative import in non-package"
+ where = "in non-package"
else:
- msg = "Attempted relative import beyond toplevel package"
- raise OperationError(space.w_ValueError, w(msg))
+ where = "beyond toplevel package"
+ raise oefmt(space.w_ValueError,
+ "Attempted relative import %s", where)
# Try to import parent package
try:
@@ -179,9 +179,9 @@
if not e.match(space, space.w_ImportError):
raise
if level > 0:
- raise OperationError(space.w_SystemError, space.wrap(
- "Parent module '%s' not loaded, "
- "cannot perform relative import" % ctxt_package))
+ raise oefmt(space.w_SystemError,
+ "Parent module '%s' not loaded, cannot perform "
+ "relative import", ctxt_package)
else:
msg = ("Parent module '%s' not found while handling absolute "
"import" % ctxt_package)
@@ -214,8 +214,8 @@
dot_position = _get_dot_position(ctxt_name, m)
if dot_position < 0:
if level > 0:
- msg = "Attempted relative import in non-package"
- raise OperationError(space.w_ValueError, w(msg))
+ raise oefmt(space.w_ValueError,
+ "Attempted relative import in non-package")
rel_modulename = ''
rel_level = 0
else:
@@ -248,9 +248,7 @@
w_locals=None, w_fromlist=None, level=-1):
modulename = name
if not modulename and level < 0:
- raise OperationError(
- space.w_ValueError,
- space.wrap("Empty module name"))
+ raise oefmt(space.w_ValueError, "Empty module name")
w = space.wrap
if w_fromlist is not None and space.is_true(w_fromlist):
@@ -364,8 +362,8 @@
w = space.wrap
if '/' in modulename or '\\' in modulename:
- raise OperationError(space.w_ImportError, space.wrap(
- "Import by filename is not supported."))
+ raise oefmt(space.w_ImportError,
+ "Import by filename is not supported.")
w_mod = None
parts = modulename.split('.')
@@ -461,8 +459,7 @@
@unwrap_spec(path='str0')
def descr_init(self, space, path):
if not path:
- raise OperationError(space.w_ImportError, space.wrap(
- "empty pathname"))
+ raise oefmt(space.w_ImportError, "empty pathname")
# Directory should not exist
try:
@@ -471,8 +468,7 @@
pass
else:
if stat.S_ISDIR(st.st_mode):
- raise OperationError(space.w_ImportError, space.wrap(
- "existing directory"))
+ raise oefmt(space.w_ImportError, "existing directory")
def find_module_w(self, space, __args__):
return space.wrap(None)
@@ -700,9 +696,7 @@
"""Reload the module.
The module must have been successfully imported before."""
if not space.is_w(space.type(w_module), space.type(space.sys)):
- raise OperationError(
- space.w_TypeError,
- space.wrap("reload() argument must be module"))
+ raise oefmt(space.w_TypeError, "reload() argument must be module")
w_modulename = space.getattr(w_module, space.wrap("__name__"))
modulename = space.str0_w(w_modulename)
@@ -806,8 +800,7 @@
if self.lock is None: # CannotHaveLock occurred
return
space = self.space
- raise OperationError(space.w_RuntimeError,
- space.wrap("not holding the import lock"))
+ raise oefmt(space.w_RuntimeError, "not holding the import lock")
assert self.lockcounter > 0
self.lockcounter -= 1
if self.lockcounter == 0:
diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
--- a/pypy/module/imp/interp_imp.py
+++ b/pypy/module/imp/interp_imp.py
@@ -2,7 +2,7 @@
from pypy.module._file.interp_file import W_File
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from pypy.interpreter.module import Module
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.streamutil import wrap_streamerror
@@ -129,8 +129,7 @@
@unwrap_spec(filename=str)
def load_dynamic(space, w_modulename, filename, w_file=None):
if not importing.has_so_extension(space):
- raise OperationError(space.w_ImportError, space.wrap(
- "Not implemented"))
+ raise oefmt(space.w_ImportError, "Not implemented")
importing.load_c_extension(space, filename, space.str_w(w_modulename))
return importing.check_sys_modules(space, w_modulename)
@@ -142,9 +141,8 @@
if name not in space.builtin_modules:
return
if space.finditem(space.sys.get('modules'), w_name) is not None:
- raise OperationError(
- space.w_ImportError,
- space.wrap("cannot initialize a built-in module twice in PyPy"))
+ raise oefmt(space.w_ImportError,
+ "cannot initialize a built-in module twice in PyPy")
return space.getbuiltinmodule(name)
def init_frozen(space, w_name):
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, make_weakref_descr
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from rpython.rlib import jit
@@ -46,8 +46,7 @@
def check_number(space, w_obj):
if (space.lookup(w_obj, '__int__') is None and
space.lookup(w_obj, '__float__') is None):
- raise OperationError(space.w_TypeError,
- space.wrap("expected a number"))
+ raise oefmt(space.w_TypeError, "expected a number")
@unwrap_spec(w_start=WrappedDefault(0), w_step=WrappedDefault(1))
def W_Count___new__(space, w_subtype, w_start, w_step):
@@ -346,7 +345,9 @@
"Indicies for islice() must be None or non-negative integers")
w_stop = args_w[0]
else:
- raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)"))
+ raise oefmt(space.w_TypeError,
+ "islice() takes at most 4 arguments (%d given)",
+ num_args)
if space.is_w(w_stop, space.w_None):
stop = -1
@@ -540,7 +541,9 @@
iterator_w = space.iter(iterable_w)
except OperationError as e:
if e.match(self.space, self.space.w_TypeError):
- raise OperationError(space.w_TypeError, space.wrap(self._error_name + " argument #" + str(i + 1) + " must support iteration"))
+ raise oefmt(space.w_TypeError,
+ "%s argument #%d must support iteration",
+ self._error_name, i + 1)
else:
raise
else:
@@ -577,8 +580,8 @@
def W_IMap___new__(space, w_subtype, w_fun, args_w):
if len(args_w) == 0:
- raise OperationError(space.w_TypeError,
- space.wrap("imap() must have at least two arguments"))
+ raise oefmt(space.w_TypeError,
+ "imap() must have at least two arguments")
r = space.allocate_instance(W_IMap, w_subtype)
r.__init__(space, w_fun, args_w)
return space.wrap(r)
@@ -690,8 +693,8 @@
w_fillvalue = kwds_w["fillvalue"]
del kwds_w["fillvalue"]
if kwds_w:
- raise OperationError(space.w_TypeError, space.wrap(
- "izip_longest() got unexpected keyword argument(s)"))
+ raise oefmt(space.w_TypeError,
+ "izip_longest() got unexpected keyword argument(s)")
self = space.allocate_instance(W_IZipLongest, w_subtype)
self.__init__(space, space.w_None, arguments_w)
@@ -847,7 +850,7 @@
return tuple([gen(it.next) for i in range(n)])
"""
if n < 0:
- raise OperationError(space.w_ValueError, space.wrap("n must be >= 0"))
+ raise oefmt(space.w_ValueError, "n must be >= 0")
if isinstance(w_iterable, W_TeeIterable): # optimization only
chained_list = w_iterable.chained_list
@@ -1167,8 +1170,8 @@
w_repeat = kwds_w['repeat']
del kwds_w['repeat']
if kwds_w:
- raise OperationError(space.w_TypeError, space.wrap(
- "product() got unexpected keyword argument(s)"))
+ raise oefmt(space.w_TypeError,
+ "product() got unexpected keyword argument(s)")
r = space.allocate_instance(W_Product, w_subtype)
r.__init__(space, arguments_w, w_repeat)
@@ -1270,9 +1273,7 @@
def W_Combinations__new__(space, w_subtype, w_iterable, r):
pool_w = space.fixedview(w_iterable)
if r < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("r must be non-negative")
- )
+ raise oefmt(space.w_ValueError, "r must be non-negative")
indices = range(len(pool_w))
res = space.allocate_instance(W_Combinations, w_subtype)
res.__init__(space, pool_w, indices, r)
@@ -1305,8 +1306,7 @@
def W_CombinationsWithReplacement__new__(space, w_subtype, w_iterable, r):
pool_w = space.fixedview(w_iterable)
if r < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("r must be non-negative"))
+ raise oefmt(space.w_ValueError, "r must be non-negative")
indices = [0] * r
res = space.allocate_instance(W_CombinationsWithReplacement, w_subtype)
res.__init__(space, pool_w, indices, r)
diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
--- a/pypy/module/marshal/interp_marshal.py
+++ b/pypy/module/marshal/interp_marshal.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import WrappedDefault, unwrap_spec
from rpython.rlib.rarithmetic import intmask
from rpython.rlib import rstackovf
@@ -60,8 +60,7 @@
def raise_eof(self):
space = self.space
- raise OperationError(space.w_EOFError, space.wrap(
- 'EOF read where object expected'))
+ raise oefmt(space.w_EOFError, "EOF read where object expected")
def finished(self):
pass
@@ -81,8 +80,8 @@
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- 'marshal.dump() 2nd arg must be file-like object'))
+ raise oefmt(space.w_TypeError,
+ "marshal.dump() 2nd arg must be file-like object")
def write(self, data):
space = self.space
@@ -98,8 +97,8 @@
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- 'marshal.load() arg must be file-like object'))
+ raise oefmt(space.w_TypeError,
+ "marshal.load() arg must be file-like object")
def read(self, n):
space = self.space
@@ -416,8 +415,7 @@
tc = self.get1()
w_ret = self._dispatch[ord(tc)](space, self, tc)
if w_ret is None and not allow_null:
- raise OperationError(space.w_TypeError, space.wrap(
- 'NULL object in marshal data'))
From pypy.commits at gmail.com Mon May 2 21:13:36 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 18:13:36 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: merge default (oefmt pypy/module/!(_*))
Message-ID: <5727fb40.a553c20a.2fb9d.3c50@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84161:5067460e27d9
Date: 2016-05-02 18:12 -0700
http://bitbucket.org/pypy/pypy/changeset/5067460e27d9/
Log: merge default (oefmt pypy/module/!(_*))
diff too long, truncating to 2000 out of 3288 lines
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -18,17 +18,16 @@
@unwrap_spec(typecode=str)
def w_array(space, w_cls, typecode, __args__):
if len(__args__.arguments_w) > 1:
- msg = 'array() takes at most 2 arguments'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "array() takes at most 2 arguments")
if len(typecode) != 1:
- msg = 'array() argument 1 must be char, not str'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "array() argument 1 must be char, not str")
typecode = typecode[0]
if space.is_w(w_cls, space.gettypeobject(W_ArrayBase.typedef)):
if __args__.keywords:
- msg = 'array.array() does not take keyword arguments'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "array.array() does not take keyword arguments")
for tc in unroll_typecodes:
if typecode == tc:
@@ -52,8 +51,9 @@
a.descr_frombytes(space, buf)
break
else:
- msg = 'bad typecode (must be b, B, u, h, H, i, I, l, L, q, Q, f or d)'
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "bad typecode (must be b, B, u, h, H, i, I, l, L, q, Q, f "
+ "or d)")
return a
@@ -214,8 +214,7 @@
Append items to array from list.
"""
if not space.isinstance_w(w_lst, space.w_list):
- raise OperationError(space.w_TypeError,
- space.wrap("arg must be list"))
+ raise oefmt(space.w_TypeError, "arg must be list")
s = self.len
try:
self.fromsequence(w_lst)
@@ -272,8 +271,8 @@
fromfile() method).
"""
if len(s) % self.itemsize != 0:
- msg = 'string length not a multiple of item size'
- raise OperationError(self.space.w_ValueError, self.space.wrap(msg))
+ raise oefmt(self.space.w_ValueError,
+ "string length not a multiple of item size")
oldlen = self.len
new = len(s) / self.itemsize
if not new:
@@ -303,8 +302,7 @@
if n != 0:
item = item[0:elems]
self.descr_frombytes(space, item)
- msg = "not enough items in file"
- raise OperationError(space.w_EOFError, space.wrap(msg))
+ raise oefmt(space.w_EOFError, "not enough items in file")
self.descr_fromstring(space, w_item)
def descr_tofile(self, space, w_f):
@@ -332,8 +330,8 @@
if self.typecode == 'u':
self.fromsequence(w_ustr)
else:
- msg = "fromunicode() may only be called on type 'u' arrays"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "fromunicode() may only be called on type 'u' arrays")
def descr_tounicode(self, space):
""" tounicode() -> unicode
@@ -347,8 +345,8 @@
buf = rffi.cast(UNICODE_ARRAY, self._buffer_as_unsigned())
return space.wrap(rffi.wcharpsize2unicode(buf, self.len))
else:
- msg = "tounicode() may only be called on type 'u' arrays"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "tounicode() may only be called on type 'u' arrays")
def descr_buffer_info(self, space):
""" buffer_info() -> (address, length)
@@ -420,8 +418,8 @@
not 1, 2, 4, or 8 bytes in size, RuntimeError is raised.
"""
if self.itemsize not in [1, 2, 4, 8]:
- msg = "byteswap not supported for this array"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "byteswap not supported for this array")
if self.len == 0:
return
bytes = self._charbuf_start()
@@ -704,15 +702,13 @@
try:
item = getattr(item, mytype.convert)()
except (ValueError, OverflowError):
- msg = 'unsigned %d-byte integer out of range' % \
- mytype.bytes
- raise OperationError(space.w_OverflowError,
- space.wrap(msg))
+ raise oefmt(space.w_OverflowError,
+ "unsigned %d-byte integer out of range",
+ mytype.bytes)
return rffi.cast(mytype.itemtype, item)
if mytype.unwrap == 'str_w' or mytype.unwrap == 'unicode_w':
if len(item) != 1:
- msg = 'array item must be char'
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "array item must be char")
item = item[0]
return rffi.cast(mytype.itemtype, item)
#
@@ -855,8 +851,8 @@
self.setlen(oldlen + i)
elif (not accept_different_array
and isinstance(w_iterable, W_ArrayBase)):
- msg = "can only extend with array of same kind"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can only extend with array of same kind")
else:
self.fromsequence(w_iterable)
@@ -900,8 +896,7 @@
w_item = self.w_getitem(space, i)
if space.is_true(space.eq(w_item, w_val)):
return space.wrap(i)
- msg = 'array.index(x): x not in list'
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError, "array.index(x): x not in list")
def descr_reverse(self, space):
b = self.buffer
@@ -912,8 +907,7 @@
if i < 0:
i += self.len
if i < 0 or i >= self.len:
- msg = 'pop index out of range'
- raise OperationError(space.w_IndexError, space.wrap(msg))
+ raise oefmt(space.w_IndexError, "pop index out of range")
w_val = self.w_getitem(space, i)
while i < self.len - 1:
self.buffer[i] = self.buffer[i + 1]
@@ -955,16 +949,15 @@
def setitem(self, space, w_idx, w_item):
idx, stop, step = space.decode_index(w_idx, self.len)
if step != 0:
- msg = 'can only assign array to array slice'
- raise OperationError(self.space.w_TypeError,
- self.space.wrap(msg))
+ raise oefmt(self.space.w_TypeError,
+ "can only assign array to array slice")
item = self.item_w(w_item)
self.buffer[idx] = item
def setitem_slice(self, space, w_idx, w_item):
if not isinstance(w_item, W_Array):
- raise OperationError(space.w_TypeError, space.wrap(
- "can only assign to a slice array"))
+ raise oefmt(space.w_TypeError,
+ "can only assign to a slice array")
start, stop, step, size = self.space.decode_index4(w_idx, self.len)
assert step != 0
if w_item.len != size or self is w_item:
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -154,24 +154,24 @@
def _catch_bz2_error(space, bzerror):
if BZ_CONFIG_ERROR and bzerror == BZ_CONFIG_ERROR:
- raise OperationError(space.w_SystemError,
- space.wrap("the bz2 library was not compiled correctly"))
+ raise oefmt(space.w_SystemError,
+ "the bz2 library was not compiled correctly")
if bzerror == BZ_PARAM_ERROR:
- raise OperationError(space.w_SystemError,
- space.wrap("the bz2 library has received wrong parameters"))
+ raise oefmt(space.w_SystemError,
+ "the bz2 library has received wrong parameters")
elif bzerror == BZ_MEM_ERROR:
raise OperationError(space.w_MemoryError, space.wrap(""))
elif bzerror in (BZ_DATA_ERROR, BZ_DATA_ERROR_MAGIC):
- raise OperationError(space.w_IOError, space.wrap("invalid data stream"))
+ raise oefmt(space.w_IOError, "invalid data stream")
elif bzerror == BZ_IO_ERROR:
- raise OperationError(space.w_IOError, space.wrap("unknown IO error"))
+ raise oefmt(space.w_IOError, "unknown IO error")
elif bzerror == BZ_UNEXPECTED_EOF:
- raise OperationError(space.w_EOFError,
- space.wrap(
- "compressed file ended before the logical end-of-stream was detected"))
+ raise oefmt(space.w_EOFError,
+ "compressed file ended before the logical end-of-stream "
+ "was detected")
elif bzerror == BZ_SEQUENCE_ERROR:
- raise OperationError(space.w_RuntimeError,
- space.wrap("wrong sequence of bz2 library commands used"))
+ raise oefmt(space.w_RuntimeError,
+ "wrong sequence of bz2 library commands used")
def _new_buffer_size(current_size):
# keep doubling until we reach BIGCHUNK; then the buffer size is no
@@ -258,8 +258,8 @@
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("compresslevel must be between 1 and 9"))
+ raise oefmt(self.space.w_ValueError,
+ "compresslevel must be between 1 and 9")
bzerror = intmask(BZ2_bzCompressInit(self.bzs, compresslevel, 0, 0))
if bzerror != BZ_OK:
@@ -289,8 +289,8 @@
return self.space.wrapbytes("")
if not self.running:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("this object was already flushed"))
+ raise oefmt(self.space.w_ValueError,
+ "this object was already flushed")
in_bufsize = datasize
@@ -315,8 +315,8 @@
def flush(self):
if not self.running:
- raise OperationError(self.space.w_ValueError,
- self.space.wrap("this object was already flushed"))
+ raise oefmt(self.space.w_ValueError,
+ "this object was already flushed")
self.running = False
with OutBuffer(self.bzs) as out:
@@ -396,8 +396,8 @@
unused_data attribute."""
if not self.running:
- raise OperationError(self.space.w_EOFError,
- self.space.wrap("end of stream was already found"))
+ raise oefmt(self.space.w_EOFError,
+ "end of stream was already found")
if data == '':
return self.space.wrapbytes('')
diff --git a/pypy/module/cmath/interp_cmath.py b/pypy/module/cmath/interp_cmath.py
--- a/pypy/module/cmath/interp_cmath.py
+++ b/pypy/module/cmath/interp_cmath.py
@@ -1,7 +1,7 @@
import math
from rpython.rlib.objectmodel import specialize
from rpython.tool.sourcetools import func_with_new_name
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cmath import names_and_docstrings
from rpython.rlib import rcomplex
@@ -13,11 +13,9 @@
try:
result = c_func(x, y)
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
return result
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -3,7 +3,7 @@
from rpython.rlib.rarithmetic import r_singlefloat
from rpython.tool import leakfinder
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc
@@ -240,8 +240,8 @@
load_reflection_library(space)
except Exception:
if objectmodel.we_are_translated():
- raise OperationError(space.w_ImportError,
- space.wrap("missing reflection library %s" % reflection_library))
+ raise oefmt(space.w_ImportError,
+ "missing reflection library %s", reflection_library)
return False
return True
diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py
--- a/pypy/module/cppyy/converter.py
+++ b/pypy/module/cppyy/converter.py
@@ -100,7 +100,8 @@
return fieldptr
def _is_abstract(self, space):
- raise OperationError(space.w_TypeError, space.wrap("no converter available for '%s'" % self.name))
+ raise oefmt(space.w_TypeError,
+ "no converter available for '%s'", self.name)
def convert_argument(self, space, w_obj, address, call_local):
self._is_abstract(space)
@@ -181,14 +182,15 @@
def convert_argument(self, space, w_obj, address, call_local):
w_tc = space.findattr(w_obj, space.wrap('typecode'))
if w_tc is not None and space.str_w(w_tc) != self.typecode:
- msg = "expected %s pointer type, but received %s" % (self.typecode, space.str_w(w_tc))
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "expected %s pointer type, but received %s",
+ self.typecode, space.str_w(w_tc))
x = rffi.cast(rffi.VOIDPP, address)
try:
x[0] = rffi.cast(rffi.VOIDP, get_rawbuffer(space, w_obj))
except TypeError:
- raise OperationError(space.w_TypeError,
- space.wrap("raw buffer interface not supported"))
+ raise oefmt(space.w_TypeError,
+ "raw buffer interface not supported")
ba = rffi.cast(rffi.CCHARP, address)
ba[capi.c_function_arg_typeoffset(space)] = 'o'
@@ -208,8 +210,8 @@
try:
byteptr[0] = buf.get_raw_address()
except ValueError:
- raise OperationError(space.w_TypeError,
- space.wrap("raw buffer interface not supported"))
+ raise oefmt(space.w_TypeError,
+ "raw buffer interface not supported")
class NumericTypeConverterMixin(object):
@@ -464,8 +466,8 @@
offset = capi.c_base_offset(space, w_obj.cppclass, self.cppclass, rawobject, 1)
obj_address = capi.direct_ptradd(rawobject, offset)
return rffi.cast(capi.C_OBJECT, obj_address)
- raise oefmt(space.w_TypeError, "cannot pass %T as %s",
- w_obj, self.cppclass.name)
+ raise oefmt(space.w_TypeError,
+ "cannot pass %T as %s", w_obj, self.cppclass.name)
def convert_argument(self, space, w_obj, address, call_local):
x = rffi.cast(rffi.VOIDPP, address)
diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py
--- a/pypy/module/cppyy/executor.py
+++ b/pypy/module/cppyy/executor.py
@@ -1,6 +1,6 @@
import sys
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import jit_libffi
@@ -35,8 +35,8 @@
pass
def execute(self, space, cppmethod, cppthis, num_args, args):
- raise OperationError(space.w_TypeError,
- space.wrap('return type not available or supported'))
+ raise oefmt(space.w_TypeError,
+ "return type not available or supported")
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
from pypy.module.cppyy.interp_cppyy import FastCallNotPossible
diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py
--- a/pypy/module/cppyy/ffitypes.py
+++ b/pypy/module/cppyy/ffitypes.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi
from rpython.rlib.rarithmetic import r_singlefloat
@@ -21,8 +21,8 @@
def _unwrap_object(self, space, w_obj):
arg = space.c_int_w(w_obj)
if arg != False and arg != True:
- raise OperationError(space.w_ValueError,
- space.wrap("boolean value should be bool, or integer 1 or 0"))
+ raise oefmt(space.w_ValueError,
+ "boolean value should be bool, or integer 1 or 0")
return arg
def _wrap_object(self, space, obj):
@@ -41,16 +41,15 @@
if space.isinstance_w(w_value, space.w_int):
ival = space.c_int_w(w_value)
if ival < 0 or 256 <= ival:
- raise OperationError(space.w_ValueError,
- space.wrap("char arg not in range(256)"))
+ raise oefmt(space.w_ValueError, "char arg not in range(256)")
value = rffi.cast(rffi.CHAR, space.c_int_w(w_value))
else:
value = space.str_w(w_value)
if len(value) != 1:
- raise OperationError(space.w_ValueError,
- space.wrap("char expected, got string of size %d" % len(value)))
+ raise oefmt(space.w_ValueError,
+ "char expected, got string of size %d", len(value))
return value[0] # turn it into a "char" to the annotator
class ShortTypeMixin(object):
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1,6 +1,6 @@
import pypy.module.cppyy.capi as capi
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.baseobjspace import W_Root
@@ -195,8 +195,7 @@
args_expected = len(self.arg_defs)
args_given = len(args_w)
if args_expected < args_given or args_given < self.args_required:
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("wrong number of arguments"))
+ raise oefmt(self.space.w_TypeError, "wrong number of arguments")
# initial setup of converters, executors, and libffi (if available)
if self.converters is None:
@@ -435,8 +434,9 @@
s = self.space.str_w(self.space.getattr(args_w[i], self.space.wrap('__name__')))
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "non-matching template (got %s where %s expected)" % (s, self.templ_args[i])))
+ raise oefmt(self.space.w_TypeError,
+ "non-matching template (got %s where %s expected)",
+ s, self.templ_args[i])
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
@@ -646,14 +646,16 @@
def get(self, w_cppinstance, w_pycppclass):
cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
if not cppinstance:
- raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance"))
+ raise oefmt(self.space.w_ReferenceError,
+ "attribute access requires an instance")
offset = self._get_offset(cppinstance)
return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset)
def set(self, w_cppinstance, w_value):
cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
if not cppinstance:
- raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance"))
+ raise oefmt(self.space.w_ReferenceError,
+ "attribute access requires an instance")
offset = self._get_offset(cppinstance)
self.converter.to_memory(self.space, w_cppinstance, w_value, offset)
return self.space.w_None
@@ -777,12 +779,12 @@
for f in overload.functions:
if 0 < f.signature().find(sig):
return W_CPPOverload(self.space, self, [f])
- raise OperationError(self.space.w_TypeError, self.space.wrap("no overload matches signature"))
+ raise oefmt(self.space.w_TypeError, "no overload matches signature")
def missing_attribute_error(self, name):
- return OperationError(
- self.space.w_AttributeError,
- self.space.wrap("%s '%s' has no attribute %s" % (self.kind, self.name, name)))
+ return oefmt(self.space.w_AttributeError,
+ "%s '%s' has no attribute %s",
+ self.kind, self.name, name)
def __eq__(self, other):
return self.handle == other.handle
@@ -1033,8 +1035,8 @@
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
- raise OperationError(self.space.w_ReferenceError,
- self.space.wrap("trying to access a NULL pointer"))
+ raise oefmt(self.space.w_ReferenceError,
+ "trying to access a NULL pointer")
# allow user to determine ownership rules on a per object level
def fget_python_owns(self, space):
@@ -1072,8 +1074,9 @@
except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name))
+ raise oefmt(self.space.w_TypeError,
+ "cannot instantiate abstract class '%s'",
+ self.cppclass.name)
def instance__eq__(self, w_other):
# special case: if other is None, compare pointer-style
@@ -1122,17 +1125,15 @@
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.len(w_as_builtin)
- raise OperationError(
- self.space.w_TypeError,
- self.space.wrap("'%s' has no length" % self.cppclass.name))
+ raise oefmt(self.space.w_TypeError,
+ "'%s' has no length", self.cppclass.name)
def instance__cmp__(self, w_other):
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.cmp(w_as_builtin, w_other)
- raise OperationError(
- self.space.w_AttributeError,
- self.space.wrap("'%s' has no attribute __cmp__" % self.cppclass.name))
+ raise oefmt(self.space.w_AttributeError,
+ "'%s' has no attribute __cmp__", self.cppclass.name)
def instance__repr__(self):
w_as_builtin = self._get_as_builtin()
@@ -1278,7 +1279,7 @@
if not w_cppclass:
w_cppclass = scope_byname(space, space.str_w(w_pycppclass))
if not w_cppclass:
- raise OperationError(space.w_TypeError,
- space.wrap("no such class: %s" % space.str_w(w_pycppclass)))
+ raise oefmt(space.w_TypeError,
+ "no such class: %s", space.str_w(w_pycppclass))
cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False)
return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -388,9 +388,8 @@
## arg = from_ref(space,
## rffi.cast(PyObject, input_arg))
## except TypeError, e:
- ## err = OperationError(space.w_TypeError,
- ## space.wrap(
- ## "could not cast arg to PyObject"))
+ ## err = oefmt(space.w_TypeError,
+ ## "could not cast arg to PyObject")
## if not catch_exception:
## raise err
## state = space.fromcache(State)
@@ -1648,11 +1647,13 @@
has_error = PyErr_Occurred(space) is not None
has_result = ret is not None
if has_error and has_result:
- raise OperationError(space.w_SystemError, space.wrap(
- "An exception was set, but function returned a value"))
+ raise oefmt(space.w_SystemError,
+ "An exception was set, but function returned a "
+ "value")
elif not expect_null and not has_error and not has_result:
- raise OperationError(space.w_SystemError, space.wrap(
- "Function returned a NULL result without setting an exception"))
+ raise oefmt(space.w_SystemError,
+ "Function returned a NULL result without setting "
+ "an exception")
if has_error:
state = space.fromcache(State)
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
@@ -171,8 +171,8 @@
while ref_str.c_buffer[i] != '\0':
i += 1
if i != ref_str.c_ob_size:
- raise OperationError(space.w_TypeError, space.wrap(
- "expected string without null bytes"))
+ raise oefmt(space.w_TypeError,
+ "expected string without null bytes")
return 0
@cpython_api([PyObject], Py_ssize_t, error=-1)
@@ -199,8 +199,8 @@
# XXX always create a new string so far
py_str = rffi.cast(PyBytesObject, ref[0])
if not py_str.c_buffer:
- raise OperationError(space.w_SystemError, space.wrap(
- "_PyBytes_Resize called on already created string"))
+ raise oefmt(space.w_SystemError,
+ "_PyBytes_Resize called on already created string")
try:
py_newstr = new_empty_str(space, newsize)
except MemoryError:
diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py
--- a/pypy/module/cpyext/complexobject.py
+++ b/pypy/module/cpyext/complexobject.py
@@ -5,7 +5,7 @@
make_typedescr, track_reference, from_ref)
from pypy.module.cpyext.floatobject import PyFloat_AsDouble
from pypy.objspace.std.complexobject import W_ComplexObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex")
@@ -98,8 +98,8 @@
return 0
if not PyComplex_Check(space, w_obj):
- raise OperationError(space.w_TypeError, space.wrap(
- "__complex__ should return a complex object"))
+ raise oefmt(space.w_TypeError,
+ "__complex__ should return a complex object")
assert isinstance(w_obj, W_ComplexObject)
result.c_real = w_obj.realval
diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py
--- a/pypy/module/cpyext/eval.py
+++ b/pypy/module/cpyext/eval.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.astcompiler import consts
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
@@ -103,8 +103,8 @@
elif start == Py_single_input:
mode = 'single'
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid mode parameter for compilation"))
+ raise oefmt(space.w_ValueError,
+ "invalid mode parameter for compilation")
return compiling.compile(space, w_source, filename, mode, flags)
def run_string(space, source, filename, start, w_globals, w_locals):
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -5,7 +5,7 @@
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref
from pypy.objspace.std.listobject import W_ListObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyList_Check, PyList_CheckExact = build_type_checkers("List")
@@ -52,8 +52,7 @@
if not isinstance(w_list, W_ListObject):
PyErr_BadInternalCall(space)
if index < 0 or index >= w_list.length():
- raise OperationError(space.w_IndexError, space.wrap(
- "list assignment index out of range"))
+ raise oefmt(space.w_IndexError, "list assignment index out of range")
w_list.setitem(index, w_item)
return 0
@@ -66,8 +65,7 @@
if not isinstance(w_list, W_ListObject):
PyErr_BadInternalCall(space)
if index < 0 or index >= w_list.length():
- raise OperationError(space.w_IndexError, space.wrap(
- "list index out of range"))
+ raise oefmt(space.w_IndexError, "list index out of range")
w_list.ensure_object_strategy() # make sure we can return a borrowed obj
# XXX ^^^ how does this interact with CPyListStrategy?
w_res = w_list.getitem(index)
@@ -103,8 +101,7 @@
len(list) on a list object.
"""
if not PyList_Check(space, ref):
- raise OperationError(space.w_TypeError,
- space.wrap("expected list object"))
+ raise oefmt(space.w_TypeError, "expected list object")
return PyList_GET_SIZE(space, ref)
@cpython_api([PyObject], PyObject)
diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -73,8 +73,8 @@
flags = rffi.cast(lltype.Signed, self.ml.c_ml_flags)
flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST)
if space.is_true(w_kw) and not flags & METH_KEYWORDS:
- raise OperationError(space.w_TypeError, space.wrap(
- self.name + "() takes no keyword arguments"))
+ raise oefmt(space.w_TypeError,
+ "%s() takes no keyword arguments", self.name)
func = rffi.cast(PyCFunction, self.ml.c_ml_meth)
length = space.int_w(space.len(w_args))
@@ -84,8 +84,8 @@
elif flags & METH_NOARGS:
if length == 0:
return generic_cpy_call(space, func, w_self, None)
- raise OperationError(space.w_TypeError, space.wrap(
- self.name + "() takes no arguments"))
+ raise oefmt(space.w_TypeError,
+ "%s() takes no arguments", self.name)
elif flags & METH_O:
if length != 1:
raise oefmt(space.w_TypeError,
@@ -277,7 +277,8 @@
cfunction = space.interp_w(W_PyCFunctionObject, w_obj)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise oefmt(space.w_SystemError, "bad argument to internal function")
+ raise oefmt(space.w_SystemError,
+ "bad argument to internal function")
raise
return cfunction.ml.c_ml_meth
diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py
--- a/pypy/module/cpyext/modsupport.py
+++ b/pypy/module/cpyext/modsupport.py
@@ -8,7 +8,7 @@
PyMethodDef, PyDescr_NewClassMethod, PyStaticMethod_New)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.module.cpyext.state import State
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
PyModuleDef_BaseStruct = cpython_struct(
'PyModuleDef_Base',
@@ -74,16 +74,17 @@
if w_type is None:
if flags & METH_CLASS or flags & METH_STATIC:
- raise OperationError(space.w_ValueError,
- space.wrap("module functions cannot set METH_CLASS or METH_STATIC"))
+ raise oefmt(space.w_ValueError,
+ "module functions cannot set METH_CLASS or "
+ "METH_STATIC")
w_obj = space.wrap(W_PyCFunctionObject(space, method, w_self, w_name))
else:
if methodname in dict_w and not (flags & METH_COEXIST):
continue
if flags & METH_CLASS:
if flags & METH_STATIC:
- raise OperationError(space.w_ValueError,
- space.wrap("method cannot be both class and static"))
+ raise oefmt(space.w_ValueError,
+ "method cannot be both class and static")
w_obj = PyDescr_NewClassMethod(space, w_type, method)
elif flags & METH_STATIC:
w_func = PyCFunction_NewEx(space, method, None, None)
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -3,7 +3,7 @@
Numpy C-API for PyPy - S. H. Muller, 2013/07/26
"""
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL
from pypy.module.cpyext.api import PyObject
@@ -126,15 +126,16 @@
parameter is NULL.
"""
if requirements not in (0, ARRAY_DEFAULT):
- raise OperationError(space.w_NotImplementedError, space.wrap(
- '_PyArray_FromAny called with not-implemented requirements argument'))
+ raise oefmt(space.w_NotImplementedError,
+ "_PyArray_FromAny called with not-implemented "
+ "requirements argument")
w_array = array(space, w_obj, w_dtype=w_dtype, copy=False)
if min_depth !=0 and len(w_array.get_shape()) < min_depth:
- raise OperationError(space.w_ValueError, space.wrap(
- 'object of too small depth for desired array'))
+ raise oefmt(space.w_ValueError,
+ "object of too small depth for desired array")
elif max_depth !=0 and len(w_array.get_shape()) > max_depth:
- raise OperationError(space.w_ValueError, space.wrap(
- 'object of too deep for desired array'))
+ raise oefmt(space.w_ValueError,
+ "object of too deep for desired array")
elif w_array.is_scalar():
# since PyArray_DATA() fails on scalars, create a 1D array and set empty
# shape. So the following combination works for *reading* scalars:
@@ -153,25 +154,26 @@
dtype = get_dtype_cache(space).dtypes_by_num[typenum]
return dtype
except KeyError:
- raise OperationError(space.w_ValueError, space.wrap(
- 'PyArray_DescrFromType called with invalid dtype %d' % typenum))
+ raise oefmt(space.w_ValueError,
+ "PyArray_DescrFromType called with invalid dtype %d",
+ typenum)
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject, header=HEADER)
def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth):
try:
dtype = get_dtype_cache(space).dtypes_by_num[typenum]
except KeyError:
- raise OperationError(space.w_ValueError, space.wrap(
- '_PyArray_FromObject called with invalid dtype %d' % typenum))
+ raise oefmt(space.w_ValueError,
+ "_PyArray_FromObject called with invalid dtype %d",
+ typenum)
try:
return _PyArray_FromAny(space, w_obj, dtype, min_depth, max_depth,
0, NULL);
except OperationError as e:
if e.match(space, space.w_NotImplementedError):
errstr = space.str_w(e.get_w_value(space))
- errstr = '_PyArray_FromObject' + errstr[16:]
- raise OperationError(space.w_NotImplementedError, space.wrap(
- errstr))
+ raise oefmt(space.w_NotImplementedError,
+ "_PyArray_FromObject %s", errstr[16:])
raise
def get_shape_and_dtype(space, nd, dims, typenum):
@@ -214,8 +216,7 @@
rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject, header=HEADER)
def _PyArray_New(space, subtype, nd, dims, typenum, strides, data, itemsize, flags, obj):
if strides:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("strides must be NULL"))
+ raise oefmt(space.w_NotImplementedError, "strides must be NULL")
order = CORDER if flags & ARRAY_C_CONTIGUOUS else FORTRANORDER
owning = True if flags & ARRAY_OWNDATA else False
diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py
--- a/pypy/module/cpyext/number.py
+++ b/pypy/module/cpyext/number.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, Py_ssize_t
from pypy.module.cpyext.pyobject import PyObject
from rpython.rtyper.lltypesystem import rffi, lltype
@@ -114,7 +114,8 @@
@cpython_api([PyObject, PyObject, PyObject], PyObject)
def PyNumber_InPlacePower(space, w_o1, w_o2, w_o3):
if not space.is_w(w_o3, space.w_None):
- raise OperationError(space.w_ValueError, space.wrap(
- "PyNumber_InPlacePower with non-None modulus is not supported"))
+ raise oefmt(space.w_ValueError,
+ "PyNumber_InPlacePower with non-None modulus is not "
+ "supported")
return space.inplace_pow(w_o1, w_o2)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -10,7 +10,7 @@
from pypy.module.cpyext.typeobject import PyTypeObjectPtr
from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall
from pypy.objspace.std.typeobject import W_TypeObject
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
import pypy.module.__builtin__.operation as operation
@@ -379,17 +379,15 @@
try:
w_meth = space.getattr(w_obj, space.wrap('fileno'))
except OperationError:
- raise OperationError(
- space.w_TypeError, space.wrap(
- "argument must be an int, or have a fileno() method."))
+ raise oefmt(space.w_TypeError,
+ "argument must be an int, or have a fileno() method.")
else:
w_fd = space.call_function(w_meth)
fd = space.int_w(w_fd)
if fd < 0:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "file descriptor cannot be a negative integer"))
+ raise oefmt(space.w_ValueError,
+ "file descriptor cannot be a negative integer")
return rffi.cast(rffi.INT_real, fd)
@@ -412,7 +410,7 @@
allowing a type to explicitly indicate to the interpreter that it is not
hashable.
"""
- raise OperationError(space.w_TypeError, space.wrap("unhashable type"))
+ raise oefmt(space.w_TypeError, "unhashable type")
@cpython_api([PyObject], PyObject)
def PyObject_Dir(space, w_o):
@@ -435,8 +433,8 @@
pb = pto.c_tp_as_buffer
if not (pb and pb.c_bf_getbuffer):
- raise OperationError(space.w_TypeError, space.wrap(
- "expected an object with the buffer interface"))
+ raise oefmt(space.w_TypeError,
+ "expected an object with the buffer interface")
with lltype.scoped_alloc(Py_buffer) as view:
ret = generic_cpy_call(
space, pb.c_bf_getbuffer,
@@ -488,9 +486,7 @@
provides a subset of CPython's behavior.
"""
if flags & PyBUF_WRITABLE and readonly:
- raise OperationError(
- space.w_ValueError, space.wrap(
- "Object is not writable"))
+ raise oefmt(space.w_ValueError, "Object is not writable")
view.c_buf = buf
view.c_len = length
view.c_obj = obj
diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py
--- a/pypy/module/cpyext/pyerrors.py
+++ b/pypy/module/cpyext/pyerrors.py
@@ -1,7 +1,7 @@
import os
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError, strerror as _strerror
+from pypy.interpreter.error import OperationError, oefmt, strerror as _strerror
from pypy.interpreter import pytraceback
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
@@ -106,12 +106,11 @@
argument. It is mostly for internal use. In CPython this function always
raises an exception and returns 0 in all cases, hence the (ab)use of the
error indicator."""
- raise OperationError(space.w_TypeError,
- space.wrap("bad argument type for built-in operation"))
+ raise oefmt(space.w_TypeError, "bad argument type for built-in operation")
@cpython_api([], lltype.Void)
def PyErr_BadInternalCall(space):
- raise OperationError(space.w_SystemError, space.wrap("Bad internal call!"))
+ raise oefmt(space.w_SystemError, "Bad internal call!")
@cpython_api([], PyObject, error=CANNOT_FAIL)
def PyErr_NoMemory(space):
diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py
--- a/pypy/module/cpyext/pystrtod.py
+++ b/pypy/module/cpyext/pystrtod.py
@@ -1,5 +1,5 @@
import errno
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import cpython_api, CONST_STRING
from pypy.module.cpyext.pyobject import PyObject
from rpython.rlib import rdtoa
@@ -63,9 +63,8 @@
endpos = (rffi.cast(rffi.LONG, endptr[0]) -
rffi.cast(rffi.LONG, s))
if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
- raise OperationError(
- space.w_ValueError,
- space.wrap('invalid input at position %s' % endpos))
+ raise oefmt(space.w_ValueError,
+ "invalid input at position %d", endpos)
err = rffi.cast(lltype.Signed, rposix._get_errno())
if err == errno.ERANGE:
rposix._set_errno(rffi.cast(rffi.INT, 0))
@@ -75,8 +74,7 @@
else:
return -rfloat.INFINITY
else:
- raise OperationError(w_overflow_exception,
- space.wrap('value too large'))
+ raise oefmt(w_overflow_exception, "value too large")
return result
finally:
if not user_endptr:
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -63,8 +63,9 @@
return w_obj.getitem(index)
elif isinstance(w_obj, tupleobject.W_TupleObject):
return w_obj.wrappeditems[index]
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_GET_ITEM called but object is not a list or sequence'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_GET_ITEM called but object is not a list or "
+ "sequence")
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
def PySequence_Fast_GET_SIZE(space, w_obj):
@@ -77,8 +78,9 @@
return w_obj.length()
elif isinstance(w_obj, tupleobject.W_TupleObject):
return len(w_obj.wrappeditems)
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_GET_SIZE called but object is not a list or sequence'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_GET_SIZE called but object is not a list or "
+ "sequence")
@cpython_api([PyObject], PyObjectP)
def PySequence_Fast_ITEMS(space, w_obj):
@@ -93,8 +95,9 @@
cpy_strategy = space.fromcache(CPyListStrategy)
if w_obj.strategy is cpy_strategy:
return w_obj.get_raw_items() # asserts it's a cpyext strategy
- raise OperationError(space.w_TypeError, space.wrap(
- 'PySequence_Fast_ITEMS called but object is not the result of PySequence_Fast'))
+ raise oefmt(space.w_TypeError,
+ "PySequence_Fast_ITEMS called but object is not the result of "
+ "PySequence_Fast")
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject)
def PySequence_GetSlice(space, w_obj, start, end):
@@ -227,8 +230,7 @@
return idx
idx += 1
- raise OperationError(space.w_ValueError, space.wrap(
- "sequence.index(x): x not in sequence"))
+ raise oefmt(space.w_ValueError, "sequence.index(x): x not in sequence")
class CPyListStrategy(ListStrategy):
erase, unerase = rerased.new_erasing_pair("empty")
@@ -263,8 +265,8 @@
def getslice(self, w_list, start, stop, step, length):
#storage = self.unerase(w_list.lstorage)
- raise OperationError(w_list.space.w_NotImplementedError, w_list.space.wrap(
- "settting a slice of a PySequence_Fast is not supported"))
+ raise oefmt(w_list.space.w_NotImplementedError,
+ "settting a slice of a PySequence_Fast is not supported")
def getitems(self, w_list):
# called when switching list strategy, so convert storage
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
--- a/pypy/module/cpyext/setobject.py
+++ b/pypy/module/cpyext/setobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
build_type_checkers)
@@ -85,8 +85,7 @@
len(anyset). Raises a PyExc_SystemError if anyset is not a set, frozenset,
or an instance of a subtype."""
if not PySet_Check(space, ref):
- raise OperationError(space.w_TypeError,
- space.wrap("expected set object"))
+ raise oefmt(space.w_TypeError, "expected set object")
return PySet_GET_SIZE(space, ref)
@cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -35,8 +35,8 @@
def check_num_args(space, w_ob, n):
from pypy.module.cpyext.tupleobject import PyTuple_CheckExact
if not PyTuple_CheckExact(space, w_ob):
- raise OperationError(space.w_SystemError,
- space.wrap("PyArg_UnpackTuple() argument list is not a tuple"))
+ raise oefmt(space.w_SystemError,
+ "PyArg_UnpackTuple() argument list is not a tuple")
if n == space.len_w(w_ob):
return
raise oefmt(space.w_TypeError,
@@ -46,8 +46,8 @@
def check_num_argsv(space, w_ob, low, high):
from pypy.module.cpyext.tupleobject import PyTuple_CheckExact
if not PyTuple_CheckExact(space, w_ob):
- raise OperationError(space.w_SystemError,
- space.wrap("PyArg_UnpackTuple() argument list is not a tuple"))
+ raise oefmt(space.w_SystemError,
+ "PyArg_UnpackTuple() argument list is not a tuple")
if low <=space.len_w(w_ob) <= high:
return
raise oefmt(space.w_TypeError,
@@ -183,9 +183,7 @@
if w_type is space.w_None:
w_type = None
if w_obj is None and w_type is None:
- raise OperationError(
- space.w_TypeError,
- space.wrap("__get__(None, None) is invalid"))
+ raise oefmt(space.w_TypeError, "__get__(None, None) is invalid")
return generic_cpy_call(space, func_target, w_self, w_obj, w_type)
def wrap_descr_set(space, w_self, w_args, func):
diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
--- a/pypy/module/cpyext/state.py
+++ b/pypy/module/cpyext/state.py
@@ -1,6 +1,6 @@
from rpython.rlib.objectmodel import we_are_translated
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.executioncontext import AsyncAction
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import llhelper
@@ -52,8 +52,9 @@
self.clear_exception()
raise operror
if always:
- raise OperationError(self.space.w_SystemError, self.space.wrap(
- "Function returned an error result without setting an exception"))
+ raise oefmt(self.space.w_SystemError,
+ "Function returned an error result without setting an "
+ "exception")
def build_api(self, space):
"""NOT_RPYTHON
diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py
--- a/pypy/module/cpyext/structmember.py
+++ b/pypy/module/cpyext/structmember.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.structmemberdefs import *
@@ -79,8 +79,7 @@
w_name = space.wrap(rffi.charp2str(w_member.c_name))
raise OperationError(space.w_AttributeError, w_name)
else:
- raise OperationError(space.w_SystemError,
- space.wrap("bad memberdescr type"))
+ raise oefmt(space.w_SystemError, "bad memberdescr type")
return w_result
@@ -94,16 +93,15 @@
if (flags & READONLY or
member_type in [T_STRING, T_STRING_INPLACE]):
- raise OperationError(space.w_TypeError,
- space.wrap("readonly attribute"))
+ raise oefmt(space.w_TypeError, "readonly attribute")
elif w_value is None:
if member_type == T_OBJECT_EX:
if not rffi.cast(PyObjectP, addr)[0]:
w_name = space.wrap(rffi.charp2str(w_member.c_name))
raise OperationError(space.w_AttributeError, w_name)
elif member_type != T_OBJECT:
- raise OperationError(space.w_TypeError,
- space.wrap("can't delete numeric/char attribute"))
+ raise oefmt(space.w_TypeError,
+ "can't delete numeric/char attribute")
for converter in integer_converters:
typ, lltyp, getter = converter
@@ -116,8 +114,7 @@
if member_type == T_CHAR:
str_value = space.str_w(w_value)
if len(str_value) != 1:
- raise OperationError(space.w_TypeError,
- space.wrap("string of length 1 expected"))
+ raise oefmt(space.w_TypeError, "string of length 1 expected")
array = rffi.cast(rffi.CCHARP, addr)
array[0] = str_value[0]
elif member_type in [T_OBJECT, T_OBJECT_EX]:
@@ -126,6 +123,5 @@
Py_DecRef(space, array[0])
array[0] = make_ref(space, w_value)
else:
- raise OperationError(space.w_SystemError,
- space.wrap("bad memberdescr type"))
+ raise oefmt(space.w_SystemError, "bad memberdescr type")
return 0
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -163,7 +163,7 @@
assert space.int_w(space.getitem(w_l, space.wrap(1))) == 2
assert space.int_w(space.getitem(w_l, space.wrap(0))) == 1
e = py.test.raises(OperationError, space.getitem, w_l, space.wrap(15))
- assert "list index out of range" in e.exconly()
+ assert "list index out of range" in e.value.errorstr(space)
assert space.int_w(space.getitem(w_l, space.wrap(-1))) == 4
space.setitem(w_l, space.wrap(1), space.wrap(13))
assert space.int_w(space.getitem(w_l, space.wrap(1))) == 13
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.debug import fatalerror_notb
from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
@@ -142,8 +142,7 @@
ref = rffi.cast(PyTupleObject, ref)
size = ref.c_ob_size
if index < 0 or index >= size:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple assignment index out of range"))
+ raise oefmt(space.w_IndexError, "tuple assignment index out of range")
old_ref = ref.c_ob_item[index]
ref.c_ob_item[index] = py_obj # consumes a reference
if old_ref:
@@ -158,8 +157,7 @@
ref = rffi.cast(PyTupleObject, ref)
size = ref.c_ob_size
if index < 0 or index >= size:
- raise OperationError(space.w_IndexError,
- space.wrap("tuple index out of range"))
+ raise oefmt(space.w_IndexError, "tuple index out of range")
return ref.c_ob_item[index] # borrowed ref
@cpython_api([PyObject], Py_ssize_t, error=-1)
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -7,7 +7,7 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root, DescrMismatch
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.typedef import (GetSetProperty, TypeDef,
interp_attrproperty, interp_attrproperty, interp2app)
from pypy.module.__builtin__.abstractinst import abstract_issubclass_w
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.unicodedata import unicodedb
from pypy.module.cpyext.api import (
@@ -234,8 +234,7 @@
# Don't use PyUnicode_Check, it will realize the object :-(
w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type))
if not space.is_true(space.issubtype(w_type, space.w_unicode)):
- raise OperationError(space.w_TypeError,
- space.wrap("expected unicode object"))
+ raise oefmt(space.w_TypeError, "expected unicode object")
return PyUnicode_AS_UNICODE(space, ref)
@cpython_api([PyObject], rffi.CCHARP)
@@ -323,8 +322,8 @@
codec."""
w_str = PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors)
if not PyBytes_Check(space, w_str):
- raise OperationError(space.w_TypeError, space.wrap(
- "encoder did not return a bytes object"))
+ raise oefmt(space.w_TypeError,
+ "encoder did not return a bytes object")
return w_str
@cpython_api([PyObject], PyObject)
@@ -402,8 +401,7 @@
All other objects, including Unicode objects, cause a TypeError to be
set."""
if not encoding:
- raise OperationError(space.w_TypeError,
- space.wrap("decoding Unicode is not supported"))
+ raise oefmt(space.w_TypeError, "decoding Unicode is not supported")
w_encoding = space.wrap(rffi.charp2str(encoding))
if errors:
w_errors = space.wrap(rffi.charp2str(errors))
@@ -422,8 +420,7 @@
raise
w_meth = None
if w_meth is None:
- raise OperationError(space.w_TypeError,
- space.wrap("decoding Unicode is not supported"))
+ raise oefmt(space.w_TypeError, "decoding Unicode is not supported")
return space.call_function(w_meth, w_encoding, w_errors)
@@ -561,8 +558,8 @@
# XXX always create a new string so far
py_uni = rffi.cast(PyUnicodeObject, ref[0])
if not py_uni.c_buffer:
- raise OperationError(space.w_SystemError, space.wrap(
- "PyUnicode_Resize called on already created string"))
+ raise oefmt(space.w_SystemError,
+ "PyUnicode_Resize called on already created string")
try:
py_newuni = new_empty_unicode(space, newsize)
except MemoryError:
diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py
--- a/pypy/module/exceptions/interp_exceptions.py
+++ b/pypy/module/exceptions/interp_exceptions.py
@@ -215,7 +215,8 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("setting exceptions's dictionary to a non-dict"))
+ raise oefmt(space.w_TypeError,
+ "setting exceptions's dictionary to a non-dict")
self.w_dict = w_dict
def descr_reduce(self, space):
diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py
--- a/pypy/module/fcntl/interp_fcntl.py
+++ b/pypy/module/fcntl/interp_fcntl.py
@@ -1,6 +1,6 @@
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.interpreter.error import OperationError, wrap_oserror, oefmt
+from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from rpython.rlib import rposix
from rpython.translator.tool.cbuild import ExternalCompilationInfo
@@ -174,8 +174,7 @@
elif op & LOCK_EX:
l_type = F_WRLCK
else:
- raise OperationError(space.w_ValueError,
- space.wrap("unrecognized lock operation"))
+ raise oefmt(space.w_ValueError, "unrecognized lock operation")
op = [F_SETLKW, F_SETLK][int(bool(op & LOCK_NB))]
op = rffi.cast(rffi.INT, op) # C long => C int
@@ -231,9 +230,9 @@
lltype.free(ll_arg, flavor='raw')
if mutate_flag != -1:
- raise OperationError(space.w_TypeError, space.wrap(
- "ioctl requires a file or file descriptor, an integer "
- "and optionally an integer or buffer argument"))
+ raise oefmt(space.w_TypeError,
+ "ioctl requires a file or file descriptor, an integer and "
+ "optionally an integer or buffer argument")
try:
arg = space.getarg_w('s#', w_arg)
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -1,5 +1,5 @@
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rlib import rgc
@@ -39,8 +39,7 @@
def enable_finalizers(space):
if space.user_del_action.finalizers_lock_count == 0:
- raise OperationError(space.w_ValueError,
- space.wrap("finalizers are already enabled"))
+ raise oefmt(space.w_ValueError, "finalizers are already enabled")
space.user_del_action.finalizers_lock_count -= 1
space.user_del_action.fire()
@@ -53,8 +52,7 @@
def dump_heap_stats(space, filename):
tb = rgc._heap_stats()
if not tb:
- raise OperationError(space.w_RuntimeError,
- space.wrap("Wrong GC"))
+ raise oefmt(space.w_RuntimeError, "Wrong GC")
f = open(filename, mode="w")
for i in range(len(tb)):
f.write("%d %d " % (tb[i].count, tb[i].size))
diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py
--- a/pypy/module/gc/referents.py
+++ b/pypy/module/gc/referents.py
@@ -2,7 +2,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import wrap_oserror, OperationError
+from pypy.interpreter.error import oefmt, wrap_oserror
from rpython.rlib.objectmodel import we_are_translated
@@ -41,8 +41,8 @@
return gcref
def missing_operation(space):
- return OperationError(space.w_NotImplementedError,
- space.wrap("operation not implemented by this GC"))
+ return oefmt(space.w_NotImplementedError,
+ "operation not implemented by this GC")
# ____________________________________________________________
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -160,8 +160,7 @@
if self.lock is None: # CannotHaveLock occurred
return
space = self.space
- raise OperationError(space.w_RuntimeError,
- space.wrap("not holding the import lock"))
+ raise oefmt(space.w_RuntimeError, "not holding the import lock")
assert self.lockcounter > 0
self.lockcounter -= 1
if self.lockcounter == 0:
diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
--- a/pypy/module/imp/interp_imp.py
+++ b/pypy/module/imp/interp_imp.py
@@ -1,7 +1,7 @@
from pypy.module.imp import importing
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from pypy.interpreter.module import Module
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.pycode import PyCode
@@ -53,8 +53,7 @@
@unwrap_spec(filename='fsencode')
def load_dynamic(space, w_modulename, filename, w_file=None):
if not importing.has_so_extension(space):
- raise OperationError(space.w_ImportError, space.wrap(
- "Not implemented"))
+ raise oefmt(space.w_ImportError, "Not implemented")
# the next line is mandatory to init cpyext
space.getbuiltinmodule("cpyext")
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -47,8 +47,7 @@
def check_number(space, w_obj):
if (space.lookup(w_obj, '__int__') is None and
space.lookup(w_obj, '__float__') is None):
- raise OperationError(space.w_TypeError,
- space.wrap("expected a number"))
+ raise oefmt(space.w_TypeError, "expected a number")
@unwrap_spec(w_start=WrappedDefault(0), w_step=WrappedDefault(1))
def W_Count___new__(space, w_subtype, w_start, w_step):
@@ -333,7 +332,9 @@
"Indicies for islice() must be None or non-negative integers")
w_stop = args_w[0]
else:
- raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)"))
+ raise oefmt(space.w_TypeError,
+ "islice() takes at most 4 arguments (%d given)",
+ num_args)
if space.is_w(w_stop, space.w_None):
stop = -1
@@ -630,8 +631,8 @@
w_fillvalue = kwds_w["fillvalue"]
del kwds_w["fillvalue"]
if kwds_w:
- raise OperationError(space.w_TypeError, space.wrap(
- "zip_longest() got unexpected keyword argument(s)"))
+ raise oefmt(space.w_TypeError,
+ "zip_longest() got unexpected keyword argument(s)")
self = space.allocate_instance(W_ZipLongest, w_subtype)
self.__init__(space, space.w_None, arguments_w)
@@ -817,7 +818,7 @@
return tuple([gen(it.next) for i in range(n)])
"""
if n < 0:
- raise OperationError(space.w_ValueError, space.wrap("n must be >= 0"))
+ raise oefmt(space.w_ValueError, "n must be >= 0")
if isinstance(w_iterable, W_TeeIterable): # optimization only
w_chained_list = w_iterable.w_chained_list
@@ -1307,8 +1308,8 @@
w_repeat = kwds_w['repeat']
del kwds_w['repeat']
if kwds_w:
- raise OperationError(space.w_TypeError, space.wrap(
- "product() got unexpected keyword argument(s)"))
+ raise oefmt(space.w_TypeError,
+ "product() got unexpected keyword argument(s)")
r = space.allocate_instance(W_Product, w_subtype)
r.__init__(space, arguments_w, w_repeat)
@@ -1447,9 +1448,7 @@
def W_Combinations__new__(space, w_subtype, w_iterable, r):
pool_w = space.fixedview(w_iterable)
if r < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("r must be non-negative")
- )
+ raise oefmt(space.w_ValueError, "r must be non-negative")
indices = range(r)
res = space.allocate_instance(W_Combinations, w_subtype)
res.__init__(space, pool_w, indices, r)
@@ -1518,8 +1517,7 @@
def W_CombinationsWithReplacement__new__(space, w_subtype, w_iterable, r):
pool_w = space.fixedview(w_iterable)
if r < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("r must be non-negative"))
+ raise oefmt(space.w_ValueError, "r must be non-negative")
indices = [0] * r
res = space.allocate_instance(W_CombinationsWithReplacement, w_subtype)
res.__init__(space, pool_w, indices, r)
diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
--- a/pypy/module/marshal/interp_marshal.py
+++ b/pypy/module/marshal/interp_marshal.py
@@ -57,8 +57,7 @@
def raise_eof(self):
space = self.space
- raise OperationError(space.w_EOFError, space.wrap(
- 'EOF read where object expected'))
+ raise oefmt(space.w_EOFError, "EOF read where object expected")
def finished(self):
pass
@@ -78,8 +77,8 @@
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- 'marshal.dump() 2nd arg must be file-like object'))
+ raise oefmt(space.w_TypeError,
+ "marshal.dump() 2nd arg must be file-like object")
def write(self, data):
space = self.space
@@ -95,8 +94,8 @@
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- 'marshal.load() arg must be file-like object'))
+ raise oefmt(space.w_TypeError,
+ "marshal.load() arg must be file-like object")
def read(self, n):
space = self.space
@@ -424,8 +423,7 @@
tc = self.get1()
w_ret = self._dispatch[ord(tc)](space, self, tc)
if w_ret is None and not allow_null:
- raise OperationError(space.w_TypeError, space.wrap(
- 'NULL object in marshal data'))
+ raise oefmt(space.w_TypeError, "NULL object in marshal data")
return w_ret
def load_w_obj(self):
@@ -450,8 +448,7 @@
res_w[idx] = w_ret
idx += 1
if w_ret is None:
- raise OperationError(space.w_TypeError, space.wrap(
- 'NULL object in marshal data'))
+ raise oefmt(space.w_TypeError, "NULL object in marshal data")
return res_w
def get_list_w(self):
@@ -471,8 +468,7 @@
def raise_eof(self):
space = self.space
- raise OperationError(space.w_EOFError, space.wrap(
- 'EOF read where object expected'))
+ raise oefmt(space.w_EOFError, "EOF read where object expected")
def get(self, n):
pos = self.bufpos
diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py
--- a/pypy/module/math/interp_math.py
+++ b/pypy/module/math/interp_math.py
@@ -2,7 +2,7 @@
import sys
from rpython.rlib import rfloat
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class State:
def __init__(self, space):
@@ -22,11 +22,9 @@
try:
y = f(x)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
return space.wrap(y)
math1._annspecialcase_ = 'specialize:arg(1)'
@@ -35,11 +33,9 @@
try:
r = f(x)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
return r
math1_w._annspecialcase_ = 'specialize:arg(1)'
@@ -49,11 +45,9 @@
try:
r = f(x, snd)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
return space.wrap(r)
math2._annspecialcase_ = 'specialize:arg(1)'
@@ -114,16 +108,13 @@
else:
exp = sys.maxint
else:
- raise OperationError(space.w_TypeError,
- space.wrap("integer required for second argument"))
+ raise oefmt(space.w_TypeError, "integer required for second argument")
try:
r = math.ldexp(x, exp)
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap("math range error"))
+ raise oefmt(space.w_OverflowError, "math range error")
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
return space.wrap(r)
def hypot(space, w_x, w_y):
@@ -210,11 +201,9 @@
den = math.log(base)
result /= den
except OverflowError:
- raise OperationError(space.w_OverflowError,
- space.wrap('math range error'))
+ raise oefmt(space.w_OverflowError, "math range error")
except ValueError:
- raise OperationError(space.w_ValueError,
- space.wrap('math domain error'))
+ raise oefmt(space.w_ValueError, "math domain error")
return space.wrap(result)
def log(space, w_x, w_base=None):
@@ -359,8 +348,7 @@
if v != 0.0:
if not rfloat.isfinite(v):
if rfloat.isfinite(original):
- raise OperationError(space.w_OverflowError,
- space.wrap("intermediate overflow"))
+ raise oefmt(space.w_OverflowError, "intermediate overflow")
if rfloat.isinf(original):
inf_sum += original
special_sum += original
@@ -369,7 +357,7 @@
partials.append(v)
if special_sum != 0.0:
if rfloat.isnan(inf_sum):
- raise OperationError(space.w_ValueError, space.wrap("-inf + inf"))
+ raise oefmt(space.w_ValueError, "-inf + inf")
return space.wrap(special_sum)
hi = 0.0
if partials:
diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py
--- a/pypy/module/micronumpy/arrayops.py
+++ b/pypy/module/micronumpy/arrayops.py
@@ -73,11 +73,11 @@
if space.is_none(w_x):
arr = convert_to_array(space, w_arr)
return arr.descr_nonzero(space)
- raise OperationError(space.w_ValueError, space.wrap(
- "Where should be called with either 1 or 3 arguments"))
+ raise oefmt(space.w_ValueError,
+ "Where should be called with either 1 or 3 arguments")
if space.is_none(w_x):
- raise OperationError(space.w_ValueError, space.wrap(
- "Where should be called with either 1 or 3 arguments"))
+ raise oefmt(space.w_ValueError,
+ "Where should be called with either 1 or 3 arguments")
arr = convert_to_array(space, w_arr)
x = convert_to_array(space, w_x)
y = convert_to_array(space, w_y)
@@ -129,15 +129,16 @@
orig_axis, ndim)
for arr in args_w[1:]:
if len(arr.get_shape()) != ndim:
- raise OperationError(space.w_ValueError, space.wrap(
- "all the input arrays must have same number of dimensions"))
+ raise oefmt(space.w_ValueError,
+ "all the input arrays must have same number of "
+ "dimensions")
for i, axis_size in enumerate(arr.get_shape()):
if i == axis:
shape[i] += axis_size
elif axis_size != shape[i]:
- raise OperationError(space.w_ValueError, space.wrap(
- "all the input array dimensions except for the "
- "concatenation axis must match exactly"))
+ raise oefmt(space.w_ValueError,
+ "all the input array dimensions except for the "
+ "concatenation axis must match exactly")
dtype = find_result_type(space, args_w, [])
# concatenate does not handle ndarray subtypes, it always returns a ndarray
@@ -195,8 +196,7 @@
if space.is_none(w_out):
w_out = None
elif not isinstance(w_out, W_NDimArray):
- raise OperationError(space.w_TypeError, space.wrap(
- "return arrays must be of ArrayType"))
+ raise oefmt(space.w_TypeError, "return arrays must be of ArrayType")
shape = shape_agreement_multiple(space, choices + [w_out])
out = descriptor.dtype_agreement(space, choices, shape, w_out)
dtype = out.get_dtype()
diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py
--- a/pypy/module/micronumpy/base.py
+++ b/pypy/module/micronumpy/base.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from rpython.tool.pairtype import extendabletype
from rpython.rlib.rarithmetic import ovfcheck
from pypy.module.micronumpy import support
@@ -76,8 +76,8 @@
raise oefmt(space.w_ValueError, "array is too big.")
if storage_bytes > 0 :
if totalsize > storage_bytes:
- raise OperationError(space.w_TypeError, space.wrap(
- "buffer is too small for requested array"))
+ raise oefmt(space.w_TypeError,
+ "buffer is too small for requested array")
else:
storage_bytes = totalsize
if strides is None:
@@ -97,8 +97,8 @@
backstrides = calc_backstrides(strides, shape)
if w_base is not None:
if owning:
- raise OperationError(space.w_ValueError,
- space.wrap("Cannot have owning=True when specifying a buffer"))
+ raise oefmt(space.w_ValueError,
+ "Cannot have owning=True when specifying a buffer")
if writable:
impl = concrete.ConcreteArrayWithBase(shape, dtype, order,
strides, backstrides, storage, w_base,
diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py
--- a/pypy/module/micronumpy/boxes.py
+++ b/pypy/module/micronumpy/boxes.py
@@ -187,8 +187,7 @@
elif (space.isinstance_w(w_item, space.w_tuple) and
space.len_w(w_item) == 0):
return self
- raise OperationError(space.w_IndexError, space.wrap(
- "invalid index to scalar variable"))
+ raise oefmt(space.w_IndexError, "invalid index to scalar variable")
def descr_iter(self, space):
# Making numpy scalar non-iterable with a valid __getitem__ method
@@ -337,8 +336,7 @@
@unwrap_spec(decimals=int)
def descr_round(self, space, decimals=0, w_out=None):
if not space.is_none(w_out):
- raise OperationError(space.w_NotImplementedError, space.wrap(
- "out not supported"))
+ raise oefmt(space.w_NotImplementedError, "out not supported")
return self.get_dtype(space).itemtype.round(self, decimals)
def descr_astype(self, space, w_dtype):
@@ -363,14 +361,13 @@
dtype = space.interp_w(W_Dtype,
space.call_function(space.gettypefor(W_Dtype), w_dtype))
if dtype.elsize == 0:
- raise OperationError(space.w_TypeError, space.wrap(
- "data-type must not be 0-sized"))
+ raise oefmt(space.w_TypeError, "data-type must not be 0-sized")
if dtype.elsize != self.get_dtype(space).elsize:
- raise OperationError(space.w_ValueError, space.wrap(
- "new type not compatible with array."))
+ raise oefmt(space.w_ValueError,
+ "new type not compatible with array.")
if dtype.is_record():
- raise OperationError(space.w_NotImplementedError, space.wrap(
- "viewing scalar as record not implemented"))
+ raise oefmt(space.w_NotImplementedError,
+ "viewing scalar as record not implemented")
else:
return dtype.runpack_str(space, self.raw_str())
diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py
--- a/pypy/module/micronumpy/casting.py
+++ b/pypy/module/micronumpy/casting.py
@@ -3,7 +3,7 @@
from rpython.rlib import jit
from rpython.rlib.signature import signature, types as ann
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import oefmt, OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
from pypy.module.micronumpy import constants as NPY
diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -5,7 +5,7 @@
import py
from pypy.interpreter import special
from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from rpython.rlib.objectmodel import specialize, instantiate
from rpython.rlib.nonconst import NonConstant
from rpython.rlib.rarithmetic import base_int
@@ -244,7 +244,7 @@
try:
return w_dict[index]
except KeyError as e:
- raise OperationError(self.w_KeyError, self.wrap("key error"))
+ raise oefmt(self.w_KeyError, "key error")
assert isinstance(obj, ListObject)
assert isinstance(index, IntObject)
@@ -275,7 +275,7 @@
elif isinstance(w_obj, FloatObject):
return int(w_obj.floatval)
elif isinstance(w_obj, SliceObject):
- raise OperationError(self.w_TypeError, self.wrap("slice."))
+ raise oefmt(self.w_TypeError, "slice.")
raise NotImplementedError
def unpackcomplex(self, w_obj):
@@ -462,7 +462,7 @@
def next(self):
space = self.space
if self.i >= len(self.items):
- raise OperationError(space.w_StopIteration, space.wrap("stop iteration"))
+ raise oefmt(space.w_StopIteration, "stop iteration")
self.i += 1
return self.items[self.i-1][0]
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from rpython.rlib import jit, rgc
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rlib.listsort import make_timsort_class
@@ -251,8 +251,9 @@
w_idx = w_idx.get_scalar_value().item(space)
if not space.isinstance_w(w_idx, space.w_int) and \
not space.isinstance_w(w_idx, space.w_bool):
- raise OperationError(space.w_IndexError, space.wrap(
- "arrays used as indices must be of integer (or boolean) type"))
+ raise oefmt(space.w_IndexError,
+ "arrays used as indices must be of integer (or "
+ "boolean) type")
return [IntegerChunk(w_idx), EllipsisChunk()]
elif space.is_w(w_idx, space.w_None):
return [NewAxisChunk(), EllipsisChunk()]
@@ -564,8 +565,7 @@
self.flags &= ~ NPY.ARRAY_WRITEABLE
def descr_setitem(self, space, orig_array, w_index, w_value):
- raise OperationError(space.w_ValueError, space.wrap(
- "assignment destination is read-only"))
+ raise oefmt(space.w_ValueError, "assignment destination is read-only")
class NonWritableArray(ConcreteArray):
@@ -576,8 +576,7 @@
self.flags &= ~ NPY.ARRAY_WRITEABLE
def descr_setitem(self, space, orig_array, w_index, w_value):
- raise OperationError(space.w_ValueError, space.wrap(
- "assignment destination is read-only"))
+ raise oefmt(space.w_ValueError, "assignment destination is read-only")
class SliceArray(BaseConcreteArray):
@@ -671,8 +670,7 @@
self.flags &= ~NPY.ARRAY_WRITEABLE
def descr_setitem(self, space, orig_array, w_index, w_value):
- raise OperationError(space.w_ValueError, space.wrap(
- "assignment destination is read-only"))
+ raise oefmt(space.w_ValueError, "assignment destination is read-only")
class VoidBoxStorage(BaseConcreteArray):
diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py
--- a/pypy/module/micronumpy/converters.py
+++ b/pypy/module/micronumpy/converters.py
@@ -17,8 +17,8 @@
elif ch in ('s', 'S'):
endian = NPY.SWAP
else:
- raise OperationError(space.w_ValueError, space.wrap(
- "%s is an unrecognized byteorder" % new_order))
+ raise oefmt(space.w_ValueError,
+ "%s is an unrecognized byteorder", new_order)
return endian
@@ -37,8 +37,7 @@
mode = space.int_w(w_mode)
if NPY.CLIP <= mode <= NPY.RAISE:
return mode
- raise OperationError(space.w_TypeError,
- space.wrap("clipmode not understood"))
+ raise oefmt(space.w_TypeError, "clipmode not understood")
def searchside_converter(space, w_obj):
@@ -92,12 +91,11 @@
if axis < 0:
axis += ndim
if axis < 0 or axis >= ndim:
- raise OperationError(space.w_ValueError, space.wrap(
- "'axis' entry %d is out of bounds [-%d, %d)" %
- (item, ndim, ndim)))
+ raise oefmt(space.w_ValueError,
+ "'axis' entry %d is out of bounds [-%d, %d)",
+ item, ndim, ndim)
if out[axis]:
- raise OperationError(space.w_ValueError, space.wrap(
- "duplicate value in 'axis'"))
+ raise oefmt(space.w_ValueError, "duplicate value in 'axis'")
out[axis] = True
return out
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -266,16 +266,16 @@
if is_single_elem(space, batch[0], is_rec_type):
for w_elem in batch:
if not is_single_elem(space, w_elem, is_rec_type):
- raise OperationError(space.w_ValueError, space.wrap(
- "setting an array element with a sequence"))
+ raise oefmt(space.w_ValueError,
+ "setting an array element with a sequence")
return shape[:], batch
new_batch = []
size = space.len_w(batch[0])
for w_elem in batch:
if (is_single_elem(space, w_elem, is_rec_type) or
space.len_w(w_elem) != size):
- raise OperationError(space.w_ValueError, space.wrap(
- "setting an array element with a sequence"))
+ raise oefmt(space.w_ValueError,
+ "setting an array element with a sequence")
w_array = space.lookup(w_elem, '__array__')
if w_array is not None:
# Make sure we call the array implementation of listview,
@@ -327,8 +327,8 @@
shape = shape_converter(space, w_shape, dtype)
for dim in shape:
if dim < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "negative dimensions are not allowed"))
+ raise oefmt(space.w_ValueError,
+ "negative dimensions are not allowed")
try:
From pypy.commits at gmail.com Mon May 2 21:24:40 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 18:24:40 -0700 (PDT)
Subject: [pypy-commit] pypy stdlib-2.7.11: merge default
Message-ID: <5727fdd8.89cbc20a.ecfe2.3f55@mx.google.com>
Author: Philip Jenvey
Branch: stdlib-2.7.11
Changeset: r84162:07673190d34f
Date: 2016-05-02 18:23 -0700
http://bitbucket.org/pypy/pypy/changeset/07673190d34f/
Log: merge default
diff too long, truncating to 2000 out of 36490 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,5 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -111,23 +111,24 @@
Simon Burton
Martin Matusiak
Konstantin Lopuhin
+ Stefano Rivera
Wenzhu Man
John Witulski
Laurence Tratt
Ivan Sichmann Freitas
Greg Price
Dario Bertini
- Stefano Rivera
Mark Pearse
Simon Cross
+ Edd Barrett
Andreas Stührk
- Edd Barrett
Jean-Philippe St. Pierre
Guido van Rossum
Pavel Vinogradov
+ Spenser Bauman
Jeremy Thurgood
Paweł Piotr Przeradowski
- Spenser Bauman
+ Tobias Pape
Paul deGrandis
Ilya Osadchiy
marky1991
@@ -139,7 +140,7 @@
Georg Brandl
Bert Freudenberg
Stian Andreassen
- Tobias Pape
+ Mark Young
Wanja Saatkamp
Gerald Klix
Mike Blume
@@ -170,9 +171,9 @@
Yichao Yu
Rocco Moretti
Gintautas Miliauskas
+ Devin Jeanpierre
Michael Twomey
Lucian Branescu Mihaila
- Devin Jeanpierre
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -183,6 +184,7 @@
Victor Stinner
Andrews Medina
anatoly techtonik
+ Sergey Matyunin
Stuart Williams
Jasper Schulz
Christian Hudon
@@ -217,7 +219,6 @@
Arjun Naik
Valentina Mukhamedzhanova
Stefano Parmesan
- Mark Young
Alexis Daboville
Jens-Uwe Mager
Carl Meyer
@@ -225,7 +226,9 @@
Pieter Zieschang
Gabriel
Lukas Vacek
+ Kunal Grover
Andrew Dalke
+ Florin Papa
Sylvain Thenault
Jakub Stasiak
Nathan Taylor
@@ -240,7 +243,6 @@
Kristjan Valur Jonsson
David Lievens
Neil Blakey-Milner
- Sergey Matyunin
Lutz Paelike
Lucio Torre
Lars Wassermann
@@ -252,9 +254,11 @@
Artur Lisiecki
Sergey Kishchenko
Ignas Mikalajunas
+ Alecsandru Patrascu
Christoph Gerum
Martin Blais
Lene Wagner
+ Catalin Gabriel Manciu
Tomo Cocoa
Kim Jin Su
Toni Mattis
@@ -291,6 +295,7 @@
Akira Li
Gustavo Niemeyer
Stephan Busemann
+ florinpapa
Rafał Gałczyński
Matt Bogosian
Christian Muirhead
@@ -305,6 +310,7 @@
Boglarka Vezer
Chris Pressey
Buck Golemon
+ Diana Popa
Konrad Delong
Dinu Gherman
Chris Lambacher
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
- def __hash__(self):
- raise TypeError("deque objects are unhashable")
+ __hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
- return struct_rusage((
- float(c_struct.ru_utime),
- float(c_struct.ru_stime),
- c_struct.ru_maxrss,
- c_struct.ru_ixrss,
- c_struct.ru_idrss,
- c_struct.ru_isrss,
- c_struct.ru_minflt,
- c_struct.ru_majflt,
- c_struct.ru_nswap,
- c_struct.ru_inblock,
- c_struct.ru_oublock,
- c_struct.ru_msgsnd,
- c_struct.ru_msgrcv,
- c_struct.ru_nsignals,
- c_struct.ru_nvcsw,
- c_struct.ru_nivcsw))
def wait3(options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait3(status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
def wait4(pid, options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait4(pid, status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+ for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include
+#include
+#include
+#include
+
+static const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+ { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+ return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+ return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+ struct rlimit rl;
+ if (getrlimit(resource, &rl) == -1)
+ return -1;
+ result[0] = rl.rlim_cur;
+ result[1] = rl.rlim_max;
+ return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+ struct rlimit rl;
+ rl.rlim_cur = cur & RLIM_INFINITY;
+ rl.rlim_max = max & RLIM_INFINITY;
+ return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+ ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.5.2
+Version: 1.6.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.5.2"
-__version_info__ = (1, 5, 2)
+__version__ = "1.6.0"
+__version_info__ = (1, 6, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.5.2"
+ "\ncompiled with cffi version: 1.6.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -299,6 +299,23 @@
"""
return self._backend.string(cdata, maxlen)
+ def unpack(self, cdata, length):
+ """Unpack an array of C data of the given length,
+ returning a Python string/unicode/list.
+
+ If 'cdata' is a pointer to 'char', returns a byte string.
+ It does not stop at the first null. This is equivalent to:
+ ffi.buffer(cdata, length)[:]
+
+ If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
+ 'length' is measured in wchar_t's; it is not the size in bytes.
+
+ If 'cdata' is a pointer to anything else, returns a list of
+ 'length' items. This is a faster equivalent to:
+ [cdata[i] for i in range(length)]
+ """
+ return self._backend.unpack(cdata, length)
+
def buffer(self, cdata, size=-1):
"""Return a read-write buffer object that references the raw C data
pointed to by the given 'cdata'. The 'cdata' must be a pointer or
@@ -721,6 +738,26 @@
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
+ def list_types(self):
+ """Returns the user type names known to this FFI instance.
+ This returns a tuple containing three lists of names:
+ (typedef_names, names_of_structs, names_of_unions)
+ """
+ typedefs = []
+ structs = []
+ unions = []
+ for key in self._parser._declarations:
+ if key.startswith('typedef '):
+ typedefs.append(key[8:])
+ elif key.startswith('struct '):
+ structs.append(key[7:])
+ elif key.startswith('union '):
+ unions.append(key[6:])
+ typedefs.sort()
+ structs.sort()
+ unions.sort()
+ return (typedefs, structs, unions)
+
def _load_backend_lib(backend, name, flags):
if name is None:
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -88,6 +89,12 @@
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
+ #
+ # input: `extern "Python+C" int foo(int);`
+ # output:
+ # void __cffi_extern_python_plus_c_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
- parts.append('void __cffi_extern_python_start; ')
+ if 'C' in match.group(1):
+ parts.append('void __cffi_extern_python_plus_c_start; ')
+ else:
+ parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
break
#
try:
- self._inside_extern_python = False
+ self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -376,8 +386,10 @@
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
- elif self._inside_extern_python:
+ elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
+ elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+ tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
- self._inside_extern_python = not self._inside_extern_python
- assert self._inside_extern_python == (
- decl.name == '__cffi_extern_python_start')
+ self._inside_extern_python = decl.name
else:
- if self._inside_extern_python:
+ if self._inside_extern_python !='__cffi_extern_python_stop':
raise api.CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -1145,11 +1145,11 @@
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
+ _generate_cpy_dllexport_python_collecttype = \
+ _generate_cpy_extern_python_plus_c_collecttype = \
+ _generate_cpy_extern_python_collecttype
- def _generate_cpy_dllexport_python_collecttype(self, tp, name):
- self._generate_cpy_extern_python_collecttype(tp, name)
-
- def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+ def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
@@ -1184,11 +1184,7 @@
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
- if dllexport:
- tag = 'CFFI_DLLEXPORT'
- else:
- tag = 'static'
- prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+ prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
@@ -1206,8 +1202,14 @@
prnt()
self._num_externpy += 1
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ self._extern_python_decl(tp, name, 'static ')
+
def _generate_cpy_dllexport_python_decl(self, tp, name):
- self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+ self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+ def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+ self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
@@ -1220,8 +1222,9 @@
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
- def _generate_cpy_dllexport_python_ctx(self, tp, name):
- self._generate_cpy_extern_python_ctx(tp, name)
+ _generate_cpy_dllexport_python_ctx = \
+ _generate_cpy_extern_python_plus_c_ctx = \
+ _generate_cpy_extern_python_ctx
def _string_literal(self, s):
def _char_repr(c):
@@ -1231,7 +1234,7 @@
if c == '\n': return '\\n'
return '\\%03o' % ord(c)
lines = []
- for line in s.splitlines(True):
+ for line in s.splitlines(True) or ['']:
lines.append('"%s"' % ''.join([_char_repr(c) for c in line]))
return ' \\\n'.join(lines)
@@ -1319,7 +1322,9 @@
s = s.encode('ascii')
super(NativeIO, self).write(s)
-def _make_c_or_py_source(ffi, module_name, preamble, target_file):
+def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
+ if verbose:
+ print("generating %s" % (target_file,))
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
@@ -1331,6 +1336,8 @@
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
+ if verbose:
+ print("(already up-to-date)")
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
@@ -1343,12 +1350,14 @@
os.rename(tmp_file, target_file)
return True
-def make_c_source(ffi, module_name, preamble, target_c_file):
+def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
assert preamble is not None
- return _make_c_or_py_source(ffi, module_name, preamble, target_c_file)
+ return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
+ verbose)
-def make_py_source(ffi, module_name, target_py_file):
- return _make_c_or_py_source(ffi, module_name, None, target_py_file)
+def make_py_source(ffi, module_name, target_py_file, verbose=False):
+ return _make_c_or_py_source(ffi, module_name, None, target_py_file,
+ verbose)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
@@ -1438,7 +1447,8 @@
target = '*'
#
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
- updated = make_c_source(ffi, module_name, preamble, c_file)
+ updated = make_c_source(ffi, module_name, preamble, c_file,
+ verbose=compiler_verbose)
if call_c_compiler:
patchlist = []
cwd = os.getcwd()
@@ -1458,7 +1468,8 @@
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
- updated = make_py_source(ffi, module_name, c_file)
+ updated = make_py_source(ffi, module_name, c_file,
+ verbose=compiler_verbose)
if call_c_compiler:
return c_file
else:
@@ -1484,4 +1495,7 @@
def typeof_disabled(*args, **kwds):
raise NotImplementedError
ffi._typeof = typeof_disabled
+ for name in dir(ffi):
+ if not name.startswith('_') and not hasattr(module.ffi, name):
+ setattr(ffi, name, NotImplemented)
return module.lib
diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
- size = 32 if sys.maxint <= 2**32 else 64
- filename = '_%s_%s_.py' % (basename, size)
- dumpcache.dumpcache(__file__, filename, config)
- #
- filename = os.path.join(os.path.dirname(__file__),
- '_%s_cache.py' % (basename,))
- g = open(filename, 'w')
- print >> g, '''\
-import sys
-_size = 32 if sys.maxint <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
- globals(), locals(), ["*"])
-globals().update(_mod.__dict__)\
-''' % (basename,)
- g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
- ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
- 'LC_CTYPE',
- 'LC_TIME',
- 'LC_COLLATE',
- 'LC_MONETARY',
- 'LC_MESSAGES',
- 'LC_NUMERIC',
- 'LC_ALL',
- 'CHAR_MAX',
-]
-
-class LocaleConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
- 'locale.h'])
-for key in _CONSTANTS:
- setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
- if value is None:
- del config[key]
- _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
- # list of all possible names
- langinfo_names = [
- "RADIXCHAR", "THOUSEP", "CRNCYSTR",
- "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
- "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
- "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
- ]
- for i in range(1, 8):
- langinfo_names.append("DAY_%d" % i)
- langinfo_names.append("ABDAY_%d" % i)
- for i in range(1, 13):
- langinfo_names.append("MON_%d" % i)
- langinfo_names.append("ABMON_%d" % i)
-
- class LanginfoConfigure:
- _compilation_info_ = eci
- nl_item = SimpleType('nl_item')
- for key in langinfo_names:
- setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
- langinfo_config = configure(LanginfoConfigure)
- for key, value in langinfo_config.items():
- if value is None:
- del langinfo_config[key]
- langinfo_names.remove(key)
- config.update(langinfo_config)
- _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
- filename = os.path.join(_dirpath, name)
- d = {'__file__': filename}
- path = sys.path[:]
- try:
- sys.path.insert(0, _dirpath)
- execfile(filename, d)
- finally:
- sys.path[:] = path
-
-def try_rebuild():
- size = 32 if sys.maxint <= 2**32 else 64
- # remove the files '_*_size_.py'
- left = {}
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_%s_.py' % size) or
- p.endswith('_%s_.pyc' % size)):
- os.unlink(os.path.join(_dirpath, p))
- elif p.startswith('_') and (p.endswith('_.py') or
- p.endswith('_.pyc')):
- for i in range(2, len(p)-4):
- left[p[:i]] = True
- # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_cache.py') or
- p.endswith('_cache.pyc')):
- if p[:-9] not in left:
- os.unlink(os.path.join(_dirpath, p))
- #
- for p in os.listdir(_dirpath):
- if p.endswith('.ctc.py'):
- try:
- rebuild_one(p)
- except Exception, e:
- log.ERROR("Running %s:\n %s: %s" % (
- os.path.join(_dirpath, p),
- e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
- try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
- ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
- SimpleType)
-
-
-_CONSTANTS = (
- 'RLIM_INFINITY',
- 'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
- 'RLIMIT_CPU',
- 'RLIMIT_FSIZE',
- 'RLIMIT_DATA',
- 'RLIMIT_STACK',
- 'RLIMIT_CORE',
- 'RLIMIT_RSS',
- 'RLIMIT_NPROC',
- 'RLIMIT_NOFILE',
- 'RLIMIT_OFILE',
- 'RLIMIT_MEMLOCK',
- 'RLIMIT_AS',
- 'RLIMIT_LOCKS',
- 'RLIMIT_SIGPENDING',
- 'RLIMIT_MSGQUEUE',
- 'RLIMIT_NICE',
- 'RLIMIT_RTPRIO',
- 'RLIMIT_VMEM',
-
- 'RUSAGE_BOTH',
- 'RUSAGE_SELF',
- 'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
- rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
- setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
- setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
- if config[key] is not None:
- optional_constants.append(key)
- else:
- del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -1,4 +1,4 @@
-# ctypes implementation: Victor Stinner, 2008-05-08
+# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08
"""
This module provides access to the Unix password database.
It is available on all Unix versions.
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -1,15 +1,8 @@
-import sys
-if sys.platform == 'win32':
- raise ImportError('resource module not available for win32')
+"""http://docs.python.org/library/resource"""
-# load the platform-specific cache made by running resource.ctc.py
-from ctypes_config_cache._resource_cache import *
-
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, POINTER
+from _resource_cffi import ffi, lib
from errno import EINVAL, EPERM
-import _structseq
+import _structseq, os
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -18,106 +11,37 @@
class error(Exception):
pass
+class struct_rusage:
+ """struct_rusage: Result from getrusage.
-# Read required libc functions
-_getrusage = libc.getrusage
-_getrlimit = libc.getrlimit
-_setrlimit = libc.setrlimit
-try:
- _getpagesize = libc.getpagesize
- _getpagesize.argtypes = ()
- _getpagesize.restype = c_int
-except AttributeError:
- from os import sysconf
- _getpagesize = None
+This object may be accessed either as a tuple of
+ (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
+ nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
+or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
-
-class timeval(Structure):
- _fields_ = (
- ("tv_sec", c_long),
- ("tv_usec", c_long),
- )
- def __str__(self):
- return "(%s, %s)" % (self.tv_sec, self.tv_usec)
-
- def __float__(self):
- return self.tv_sec + self.tv_usec/1000000.0
-
-class _struct_rusage(Structure):
- _fields_ = (
- ("ru_utime", timeval),
- ("ru_stime", timeval),
- ("ru_maxrss", c_long),
- ("ru_ixrss", c_long),
- ("ru_idrss", c_long),
- ("ru_isrss", c_long),
- ("ru_minflt", c_long),
- ("ru_majflt", c_long),
- ("ru_nswap", c_long),
- ("ru_inblock", c_long),
- ("ru_oublock", c_long),
- ("ru_msgsnd", c_long),
- ("ru_msgrcv", c_long),
- ("ru_nsignals", c_long),
- ("ru_nvcsw", c_long),
- ("ru_nivcsw", c_long),
- )
-
-_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
-_getrusage.restype = c_int
-
-
-class struct_rusage:
__metaclass__ = _structseq.structseqtype
- ru_utime = _structseq.structseqfield(0)
- ru_stime = _structseq.structseqfield(1)
- ru_maxrss = _structseq.structseqfield(2)
- ru_ixrss = _structseq.structseqfield(3)
- ru_idrss = _structseq.structseqfield(4)
- ru_isrss = _structseq.structseqfield(5)
- ru_minflt = _structseq.structseqfield(6)
- ru_majflt = _structseq.structseqfield(7)
- ru_nswap = _structseq.structseqfield(8)
- ru_inblock = _structseq.structseqfield(9)
- ru_oublock = _structseq.structseqfield(10)
- ru_msgsnd = _structseq.structseqfield(11)
- ru_msgrcv = _structseq.structseqfield(12)
- ru_nsignals = _structseq.structseqfield(13)
- ru_nvcsw = _structseq.structseqfield(14)
- ru_nivcsw = _structseq.structseqfield(15)
+ ru_utime = _structseq.structseqfield(0, "user time used")
+ ru_stime = _structseq.structseqfield(1, "system time used")
+ ru_maxrss = _structseq.structseqfield(2, "max. resident set size")
+ ru_ixrss = _structseq.structseqfield(3, "shared memory size")
+ ru_idrss = _structseq.structseqfield(4, "unshared data size")
+ ru_isrss = _structseq.structseqfield(5, "unshared stack size")
+ ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O")
+ ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O")
+ ru_nswap = _structseq.structseqfield(8, "number of swap outs")
+ ru_inblock = _structseq.structseqfield(9, "block input operations")
+ ru_oublock = _structseq.structseqfield(10, "block output operations")
+ ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent")
+ ru_msgrcv = _structseq.structseqfield(12, "IPC messages received")
+ ru_nsignals = _structseq.structseqfield(13,"signals received")
+ ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches")
+ ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches")
- at builtinify
-def rlimit_check_bounds(rlim_cur, rlim_max):
- if rlim_cur > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_cur)
- if rlim_max > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_max)
-
-class rlimit(Structure):
- _fields_ = (
- ("rlim_cur", rlim_t),
- ("rlim_max", rlim_t),
- )
-
-_getrlimit.argtypes = (c_int, POINTER(rlimit))
-_getrlimit.restype = c_int
-_setrlimit.argtypes = (c_int, POINTER(rlimit))
-_setrlimit.restype = c_int
-
-
- at builtinify
-def getrusage(who):
- ru = _struct_rusage()
- ret = _getrusage(who, byref(ru))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- raise ValueError("invalid who parameter")
- raise error(errno)
+def _make_struct_rusage(ru):
return struct_rusage((
- float(ru.ru_utime),
- float(ru.ru_stime),
+ lib.my_utime(ru),
+ lib.my_stime(ru),
ru.ru_maxrss,
ru.ru_ixrss,
ru.ru_idrss,
@@ -135,48 +59,59 @@
))
@builtinify
+def getrusage(who):
+ ru = ffi.new("struct rusage *")
+ if lib.getrusage(who, ru) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("invalid who parameter")
+ raise error(ffi.errno)
+ return _make_struct_rusage(ru)
+
+ at builtinify
def getrlimit(resource):
- if not(0 <= resource < RLIM_NLIMITS):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlim = rlimit()
- ret = _getrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- raise error(errno)
- return (rlim.rlim_cur, rlim.rlim_max)
+ result = ffi.new("long long[2]")
+ if lib.my_getrlimit(resource, result) == -1:
+ raise error(ffi.errno)
+ return (result[0], result[1])
@builtinify
-def setrlimit(resource, rlim):
- if not(0 <= resource < RLIM_NLIMITS):
+def setrlimit(resource, limits):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlimit_check_bounds(*rlim)
- rlim = rlimit(rlim[0], rlim[1])
- ret = _setrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- return ValueError("current limit exceeds maximum limit")
- elif errno == EPERM:
- return ValueError("not allowed to raise maximum limit")
+ limits = tuple(limits)
+ if len(limits) != 2:
+ raise ValueError("expected a tuple of 2 integers")
+
+ if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("current limit exceeds maximum limit")
+ elif ffi.errno == EPERM:
+ raise ValueError("not allowed to raise maximum limit")
else:
- raise error(errno)
+ raise error(ffi.errno)
+
@builtinify
def getpagesize():
- if _getpagesize:
- return _getpagesize()
- else:
- try:
- return sysconf("SC_PAGE_SIZE")
- except ValueError:
- # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE
- return sysconf("SC_PAGESIZE")
+ return os.sysconf("SC_PAGESIZE")
-__all__ = ALL_CONSTANTS + (
- 'error', 'timeval', 'struct_rusage', 'rlimit',
- 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize',
+
+def _setup():
+ all_constants = []
+ p = lib.my_rlimit_consts
+ while p.name:
+ name = ffi.string(p.name)
+ globals()[name] = int(p.value)
+ all_constants.append(name)
+ p += 1
+ return all_constants
+
+__all__ = tuple(_setup()) + (
+ 'error', 'getpagesize', 'struct_rusage',
+ 'getrusage', 'getrlimit', 'setrlimit',
)
-
-del ALL_CONSTANTS
+del _setup
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
# if log is not opened, open it now
if not _S_log_open:
openlog()
+ if isinstance(message, unicode):
+ message = str(message)
lib.syslog(priority, "%s", message)
@builtinify
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -222,39 +213,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
- BoolOption("withrangelist",
- "enable special range list implementation that does not "
- "actually create the full list until the resulting "
- "list is mutated",
- default=False),
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -265,22 +231,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -296,15 +250,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -317,16 +266,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev
+ tk-dev libgc-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -81,13 +81,13 @@
Simon Burton
Martin Matusiak
Konstantin Lopuhin
+ Stefano Rivera
Wenzhu Man
John Witulski
Laurence Tratt
Ivan Sichmann Freitas
Greg Price
Dario Bertini
- Stefano Rivera
Mark Pearse
Simon Cross
Andreas Stührk
@@ -95,9 +95,10 @@
Jean-Philippe St. Pierre
Guido van Rossum
Pavel Vinogradov
+ Spenser Bauman
Jeremy Thurgood
Paweł Piotr Przeradowski
- Spenser Bauman
+ Tobias Pape
Paul deGrandis
Ilya Osadchiy
marky1991
@@ -109,7 +110,7 @@
Georg Brandl
Bert Freudenberg
Stian Andreassen
- Tobias Pape
+ Mark Young
Wanja Saatkamp
Gerald Klix
Mike Blume
@@ -140,9 +141,9 @@
Yichao Yu
Rocco Moretti
Gintautas Miliauskas
+ Devin Jeanpierre
Michael Twomey
Lucian Branescu Mihaila
- Devin Jeanpierre
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -153,6 +154,7 @@
Victor Stinner
Andrews Medina
anatoly techtonik
+ Sergey Matyunin
Stuart Williams
Jasper Schulz
Christian Hudon
@@ -187,7 +189,6 @@
Arjun Naik
Valentina Mukhamedzhanova
Stefano Parmesan
- Mark Young
Alexis Daboville
Jens-Uwe Mager
Carl Meyer
@@ -195,7 +196,9 @@
Pieter Zieschang
Gabriel
Lukas Vacek
+ Kunal Grover
Andrew Dalke
+ Florin Papa
Sylvain Thenault
Jakub Stasiak
Nathan Taylor
@@ -210,7 +213,6 @@
Kristjan Valur Jonsson
David Lievens
Neil Blakey-Milner
- Sergey Matyunin
Lutz Paelike
Lucio Torre
Lars Wassermann
@@ -222,9 +224,11 @@
Artur Lisiecki
Sergey Kishchenko
Ignas Mikalajunas
+ Alecsandru Patrascu
Christoph Gerum
Martin Blais
Lene Wagner
+ Catalin Gabriel Manciu
Tomo Cocoa
Kim Jin Su
Toni Mattis
@@ -261,6 +265,7 @@
Akira Li
Gustavo Niemeyer
Stephan Busemann
+ florinpapa
Rafał Gałczyński
Matt Bogosian
Christian Muirhead
@@ -275,6 +280,7 @@
Boglarka Vezer
Chris Pressey
Buck Golemon
+ Diana Popa
Konrad Delong
Dinu Gherman
Chris Lambacher
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details ` are `available here `.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,8 @@
.. toctree::
+ release-5.1.1.rst
+ release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
release-4.0.1.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-5.1.0.rst
whatsnew-5.0.0.rst
whatsnew-4.0.1.rst
whatsnew-4.0.0.rst
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
List Optimizations
@@ -114,8 +120,8 @@
created. This gives the memory and speed behaviour of ``xrange`` and the generality
of use of ``range``, and makes ``xrange`` essentially useless.
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
User Class Optimizations
@@ -133,8 +139,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
What is PyPy?
=============
-In common parlance, PyPy has been used to mean two things. The first is the
-:ref:`RPython translation toolchain `, which is a framework for generating
-dynamic programming language implementations. And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself. It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things. The first is the
+:ref:`RPython translation toolchain ` for generating
+interpreters for dynamic programming languages. And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things. From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter. Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
:ref:`RPython translation toolchain ` when we mean the framework.
Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.0.rst
@@ -0,0 +1,160 @@
+========
+PyPy 5.1
+========
+
+We have released PyPy 5.1, about a month after PyPy 5.0.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
+
+You can download the PyPy 5.1 release here:
+
+ http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors and
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_
+with making RPython's JIT even better.
+
+.. _`PyPy`: http://doc.pypy.org
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+.. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Other Highlights (since 5.0 released in March 2015)
+=========================================================
+
+* New features:
+
+ * A new jit backend for the IBM s390x, which was a large effort over the past
+ few months.
+
+ * Add better support for PyUnicodeObject in the C-API compatibility layer
+
+ * Support GNU/kFreeBSD Debian ports in vmprof
+
+ * Add __pypy__._promote
+
+ * Make attrgetter a single type for CPython compatibility
+
+* Bug Fixes
+
+ * Catch exceptions raised in an exit function
+
+ * Fix a corner case in the JIT
+
+ * Fix edge cases in the cpyext refcounting-compatible semantics
+ (more work on cpyext compatibility is coming in the ``cpyext-ext``
+ branch, but isn't ready yet)
+
+ * Try harder to not emit NEON instructions on ARM processors without NEON
+ support
+
+ * Improve the rpython posix module system interaction function calls
+
+ * Detect a missing class function implementation instead of calling a random
+ function
+
+ * Check that PyTupleObjects do not contain any NULLs at the
+ point of conversion to W_TupleObjects
+
+ * In ctypes, fix _anonymous_ fields of instances
+
+ * Fix JIT issue with unpack() on a Trace which contains half-written operations
+
+ * Fix sandbox startup (a regression in 5.0)
+
+ * Fix possible segfault for classes with mangled mro or __metaclass__
+
+ * Fix isinstance(deque(), Hashable) on the pure python deque
+
+ * Fix an issue with forkpty()
+
+ * Issues reported with our previous release were resolved_ after reports from users on
+ our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+ #pypy
+
+* Numpy_:
+
+ * Implemented numpy.where for a single argument
+
+ * Indexing by a numpy scalar now returns a scalar
+
+ * Fix transpose(arg) when arg is a sequence
+
+ * Refactor include file handling, now all numpy ndarray, ufunc, and umath
+ functions exported from libpypy.so are declared in pypy_numpy.h, which is
+ included only when building our fork of numpy
+
+ * Add broadcast
+
+* Performance improvements:
+
From pypy.commits at gmail.com Mon May 2 22:11:08 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 02 May 2016 19:11:08 -0700 (PDT)
Subject: [pypy-commit] pypy default: revert wrong change in d1f09c46b8e7
Message-ID: <572808bc.4412c30a.1d71d.4b2a@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84163:a3b6760236fc
Date: 2016-05-03 03:09 +0100
http://bitbucket.org/pypy/pypy/changeset/a3b6760236fc/
Log: revert wrong change in d1f09c46b8e7
diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py
--- a/pypy/module/exceptions/test/test_exc.py
+++ b/pypy/module/exceptions/test/test_exc.py
@@ -55,8 +55,7 @@
try:
raise LookupError(1, 2)
- except LookupError as xxx_todo_changeme:
- (one, two) = xxx_todo_changeme.args
+ except LookupError, (one, two):
assert one == 1
assert two == 2
From pypy.commits at gmail.com Mon May 2 22:53:29 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 19:53:29 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: oefmt
Message-ID: <572812a9.08121c0a.2efd9.0158@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84164:f60d1a596389
Date: 2016-05-02 19:52 -0700
http://bitbucket.org/pypy/pypy/changeset/f60d1a596389/
Log: oefmt
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1563,8 +1563,8 @@
from rpython.rlib import rstring
result = self.str_w(w_obj)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def bytes0_w(self, w_obj):
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -381,8 +381,7 @@
if space.is_w(w_new, space.w_None):
w_new = None
elif not space.isinstance_w(w_new, space.w_dict):
- msg = "__kwdefaults__ must be a dict"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "__kwdefaults__ must be a dict")
self.w_kw_defs = w_new
def fdel_func_kwdefaults(self, space):
@@ -414,9 +413,8 @@
self.qualname = space.unicode_w(w_name)
except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(space.w_TypeError,
- space.wrap("__qualname__ must be set "
- "to a string object"))
+ raise oefmt(space.w_TypeError,
+ "__qualname__ must be set to a string object")
raise
def fdel_func_doc(self, space):
@@ -471,8 +469,7 @@
if space.is_w(w_new, space.w_None):
w_new = None
elif not space.isinstance_w(w_new, space.w_dict):
- msg = "__annotations__ must be a dict"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError, "__annotations__ must be a dict")
self.w_ann = w_new
def fdel_func_annotations(self, space):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -754,8 +754,7 @@
w_build_class = self.get_builtin().getdictvalue(
self.space, '__build_class__')
if w_build_class is None:
- raise OperationError(self.space.w_ImportError,
- self.space.wrap("__build_class__ not found"))
+ raise oefmt(self.space.w_ImportError, "__build_class__ not found")
self.pushvalue(w_build_class)
def STORE_NAME(self, varindex, next_instr):
@@ -919,11 +918,9 @@
if space.isinstance_w(w_2, space.w_tuple):
for w_type in space.fixedview(w_2):
if not space.exception_is_valid_class_w(w_type):
- raise OperationError(space.w_TypeError,
- space.wrap(CANNOT_CATCH_MSG))
+ raise oefmt(space.w_TypeError, CANNOT_CATCH_MSG)
elif not space.exception_is_valid_class_w(w_2):
- raise OperationError(space.w_TypeError,
- space.wrap(CANNOT_CATCH_MSG))
+ raise oefmt(space.w_TypeError, CANNOT_CATCH_MSG)
return space.newbool(space.exception_match(w_1, w_2))
def COMPARE_OP(self, testnum, next_instr):
@@ -970,8 +967,7 @@
w_import = self.get_builtin().getdictvalue(space, '__import__')
if w_import is None:
- raise OperationError(space.w_ImportError,
- space.wrap("__import__ not found"))
+ raise oefmt(space.w_ImportError, "__import__ not found")
d = self.getdebug()
if d is None:
w_locals = None
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -80,8 +80,8 @@
# Disallow non-ascii characters (but not escapes)
for c in substr:
if ord(c) > 0x80:
- raise OperationError(space.w_SyntaxError, space.wrap(
- 'bytes can only contain ASCII literal characters.'))
+ raise oefmt(space.w_SyntaxError,
+ "bytes can only contain ASCII literal characters.")
if rawmode or '\\' not in substr:
if not unicode_literal:
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -56,30 +56,26 @@
frame = ec.gettopframe()
code = frame.pycode
if not code:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "super(): no code object"))
+ raise oefmt(space.w_RuntimeError, "super(): no code object")
if code.co_argcount == 0:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "super(): no arguments"))
+ raise oefmt(space.w_RuntimeError, "super(): no arguments")
w_obj = frame.locals_cells_stack_w[0]
if not w_obj:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "super(): arg[0] deleted"))
+ raise oefmt(space.w_RuntimeError, "super(): arg[0] deleted")
index = 0
for name in code.co_freevars:
if name == "__class__":
break
index += 1
else:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "super(): __class__ cell not found"))
+ raise oefmt(space.w_RuntimeError,
+ "super(): __class__ cell not found")
# a kind of LOAD_DEREF
cell = frame._getcell(len(code.co_cellvars) + index)
try:
w_starttype = cell.get()
except ValueError:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "super(): empty __class__ cell"))
+ raise oefmt(space.w_RuntimeError, "super(): empty __class__ cell")
w_obj_or_type = w_obj
if space.is_none(w_obj_or_type):
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -388,8 +388,8 @@
pass # We know it's not zero
else:
if step == 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "step argument must not be zero"))
+ raise oefmt(space.w_ValueError,
+ "step argument must not be zero")
w_length = compute_range_length(space, w_start, w_stop, w_step)
obj = space.allocate_instance(W_Range, w_subtype)
W_Range.__init__(obj, w_start, w_stop, w_step, w_length, promote_step)
@@ -688,7 +688,9 @@
iterator_w = space.iter(iterable_w)
except OperationError as e:
if e.match(self.space, self.space.w_TypeError):
- raise OperationError(space.w_TypeError, space.wrap(self._error_name + " argument #" + str(i + 1) + " must support iteration"))
+ raise oefmt(space.w_TypeError,
+ "%s argument #%d must support iteration",
+ self._error_name, i + 1)
else:
raise
else:
@@ -731,8 +733,8 @@
def W_Map___new__(space, w_subtype, w_fun, args_w):
if len(args_w) == 0:
- raise OperationError(space.w_TypeError,
- space.wrap("map() must have at least two arguments"))
+ raise oefmt(space.w_TypeError,
+ "map() must have at least two arguments")
r = space.allocate_instance(W_Map, w_subtype)
r.__init__(space, w_fun, args_w)
return space.wrap(r)
diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py
--- a/pypy/module/_socket/interp_func.py
+++ b/pypy/module/_socket/interp_func.py
@@ -2,7 +2,7 @@
from rpython.rlib.rsocket import SocketError, INVALID_SOCKET
from rpython.rlib.rarithmetic import intmask
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from pypy.module._socket.interp_socket import (
converted_error, W_Socket, addr_as_object, fill_from_object, get_error,
@@ -131,9 +131,8 @@
rsocket.SOCK_DGRAM, 0,
rsocket.AI_NUMERICHOST)
if len(lst) > 1:
- raise OperationError(
- get_error(space, 'error'),
- space.wrap("sockaddr resolved to multiple addresses"))
+ raise oefmt(get_error(space, 'error'),
+ "sockaddr resolved to multiple addresses")
addr = lst[0][4]
fill_from_object(addr, space, w_sockaddr)
host, servport = rsocket.getnameinfo(addr, flags)
diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py
--- a/pypy/module/_sre/interp_sre.py
+++ b/pypy/module/_sre/interp_sre.py
@@ -111,8 +111,9 @@
unicodestr = space.unicode_w(w_string)
if not (space.is_none(self.w_pattern) or
space.isinstance_w(self.w_pattern, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- "can't use a bytes pattern on a string-like object"))
+ raise oefmt(space.w_TypeError,
+ "can't use a bytes pattern on a string-like "
+ "object")
if pos > len(unicodestr):
pos = len(unicodestr)
if endpos > len(unicodestr):
@@ -122,8 +123,9 @@
elif space.isinstance_w(w_string, space.w_str):
if (not space.is_none(self.w_pattern) and
space.isinstance_w(self.w_pattern, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- "can't use a string pattern on a bytes-like object"))
+ raise oefmt(space.w_TypeError,
+ "can't use a string pattern on a bytes-like "
+ "object")
str = space.str_w(w_string)
if pos > len(str):
pos = len(str)
@@ -135,8 +137,9 @@
buf = space.readbuf_w(w_string)
if (not space.is_none(self.w_pattern) and
space.isinstance_w(self.w_pattern, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- "can't use a string pattern on a bytes-like object"))
+ raise oefmt(space.w_TypeError,
+ "can't use a string pattern on a bytes-like "
+ "object")
size = buf.getlength()
assert size >= 0
if pos > size:
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -239,8 +239,7 @@
def _RAND_bytes(space, n, pseudo):
if n < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "num must be positive"))
+ raise oefmt(space.w_ValueError, "num must be positive")
with rffi.scoped_alloc_buffer(n) as buf:
if pseudo:
@@ -1378,9 +1377,9 @@
"encode", space.wrap("idna")))
if hostname and not HAS_SNI:
- raise OperationError(space.w_ValueError,
- space.wrap("server_hostname is not supported "
- "by your OpenSSL library"))
+ raise oefmt(space.w_ValueError,
+ "server_hostname is not supported by your OpenSSL "
+ "library")
return new_sslobject(space, self.ctx, w_sock, server_side, hostname)
diff --git a/pypy/module/array/reconstructor.py b/pypy/module/array/reconstructor.py
--- a/pypy/module/array/reconstructor.py
+++ b/pypy/module/array/reconstructor.py
@@ -3,7 +3,7 @@
# from its memory representation.
import sys
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.argument import Arguments
from rpython.rlib import runicode, rbigint
from rpython.rlib.rstruct import ieee
@@ -80,12 +80,10 @@
space, w_cls, typecode, Arguments(space, [w_items]))
if typecode not in interp_array.types:
- raise OperationError(space.w_ValueError,
- space.wrap("invalid type code"))
+ raise oefmt(space.w_ValueError, "invalid type code")
if (mformat_code < MACHINE_FORMAT_CODE_MIN or
mformat_code > MACHINE_FORMAT_CODE_MAX):
- raise OperationError(space.w_ValueError,
- space.wrap("invalid machine format code"))
+ raise oefmt(space.w_ValueError, "invalid machine format code")
# Slow path: Decode the byte string according to the given machine
# format code. This occurs when the computer unpickling the array
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -441,8 +441,7 @@
w_obj = PyUnicode_FromObject(space, w_obj)
w_output = space.fsencode(w_obj)
if not space.isinstance_w(w_output, space.w_bytes):
- raise OperationError(space.w_TypeError,
- space.wrap("encoder failed to return bytes"))
+ raise oefmt(space.w_TypeError, "encoder failed to return bytes")
data = space.bytes0_w(w_output) # Check for NUL bytes
result[0] = make_ref(space, w_output)
return Py_CLEANUP_SUPPORTED
@@ -465,8 +464,7 @@
w_obj = PyBytes_FromObject(space, w_obj)
w_output = space.fsdecode(w_obj)
if not space.isinstance_w(w_output, space.w_unicode):
- raise OperationError(space.w_TypeError,
- space.wrap("decoder failed to return unicode"))
+ raise oefmt(space.w_TypeError, "decoder failed to return unicode")
data = space.unicode0_w(w_output) # Check for NUL bytes
result[0] = make_ref(space, w_output)
return Py_CLEANUP_SUPPORTED
diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py
--- a/pypy/module/exceptions/interp_exceptions.py
+++ b/pypy/module/exceptions/interp_exceptions.py
@@ -172,9 +172,9 @@
if space.is_w(w_newcause, space.w_None):
w_newcause = None
elif not space.exception_is_valid_class_w(space.type(w_newcause)):
- raise OperationError(space.w_TypeError, space.wrap(
- "exception cause must be None or "
- "derive from BaseException"))
+ raise oefmt(space.w_TypeError,
+ "exception cause must be None or derive from "
+ "BaseException")
self.w_cause = w_newcause
self.suppress_context = True
@@ -184,9 +184,9 @@
def descr_setcontext(self, space, w_newcontext):
if not (space.is_w(w_newcontext, space.w_None) or
space.exception_is_valid_class_w(space.type(w_newcontext))):
- raise OperationError(space.w_TypeError, space.wrap(
- "exception context must be None or "
- "derive from BaseException"))
+ raise oefmt(space.w_TypeError,
+ "exception context must be None or derive from "
+ "BaseException")
self.w_context = w_newcontext
def descr_gettraceback(self, space):
@@ -319,9 +319,9 @@
self.w_name = kw_w.pop('name', space.w_None)
self.w_path = kw_w.pop('path', space.w_None)
if kw_w:
- raise OperationError(space.w_TypeError, space.wrap(
- # CPython displays this, but it's not quite right.
- "ImportError does not take keyword arguments"))
+ # CPython displays this, but it's not quite right.
+ raise oefmt(space.w_TypeError,
+ "ImportError does not take keyword arguments")
W_Exception.descr_init(self, space, args_w)
@@ -571,8 +571,7 @@
def descr_get_written(self, space):
if self.written == -1:
- raise OperationError(space.w_AttributeError,
- space.wrap("characters_written"))
+ raise oefmt(space.w_AttributeError, "characters_written")
return space.wrap(self.written)
def descr_set_written(self, space, w_written):
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -1429,8 +1429,7 @@
def descr_setstate(self, space, w_state):
indices_w = space.fixedview(w_state)
if len(indices_w) != self.r:
- raise OperationError(space.w_ValueError, space.wrap(
- "invalid arguments"))
+ raise oefmt(space.w_ValueError, "invalid arguments")
for i in range(self.r):
index = space.int_w(indices_w[i])
max = self.get_maximum(i)
diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py
--- a/pypy/module/math/interp_math.py
+++ b/pypy/module/math/interp_math.py
@@ -390,7 +390,7 @@
# Python 2.x (and thus ll_math) raises a OverflowError improperly.
if not e.match(space, space.w_OverflowError):
raise
- raise OperationError(space.w_ValueError, space.wrap("math domain error"))
+ raise oefmt(space.w_ValueError, "math domain error")
def acosh(space, w_x):
"""Inverse hyperbolic cosine"""
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -256,5 +256,5 @@
same value."""
if space.is_w(space.type(w_str), space.w_unicode):
return space.new_interned_w_str(w_str)
- raise OperationError(space.w_TypeError, space.wrap("intern() argument must be string."))
+ raise oefmt(space.w_TypeError, "intern() argument must be string.")
diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py
--- a/pypy/module/thread/os_lock.py
+++ b/pypy/module/thread/os_lock.py
@@ -88,8 +88,8 @@
try:
self.lock.release()
except rthread.error:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "cannot release un-acquired lock"))
+ raise oefmt(space.w_RuntimeError,
+ "cannot release un-acquired lock")
def descr_lock_locked(self, space):
"""Return whether the lock is in the locked state."""
@@ -183,8 +183,8 @@
try:
self.rlock_count = ovfcheck(self.rlock_count + 1)
except OverflowError:
- raise OperationError(space.w_OverflowError, space.wrap(
- 'internal lock count overflowed'))
+ raise oefmt(space.w_OverflowError,
+ "internal lock count overflowed")
return space.w_True
r = True
@@ -212,8 +212,8 @@
to be available for other threads."""
tid = rthread.get_ident()
if self.rlock_count == 0 or self.rlock_owner != tid:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "cannot release un-acquired lock"))
+ raise oefmt(space.w_RuntimeError,
+ "cannot release un-acquired lock")
self.rlock_count -= 1
if self.rlock_count == 0:
self.rlock_owner == 0
@@ -245,8 +245,8 @@
def release_save_w(self, space):
"""For internal use by `threading.Condition`."""
if self.rlock_count == 0:
- raise OperationError(space.w_RuntimeError, space.wrap(
- "cannot release un-acquired lock"))
+ raise oefmt(space.w_RuntimeError,
+ "cannot release un-acquired lock")
count, self.rlock_count = self.rlock_count, 0
owner, self.rlock_owner = self.rlock_owner, 0
self.lock.release()
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -484,25 +484,19 @@
representation by some bad index (fixes bug #897625). No check for
year or wday since handled in _gettmarg()."""
if not 0 <= rffi.getintfield(t_ref, 'c_tm_mon') <= 11:
- raise OperationError(space.w_ValueError,
- space.wrap("month out of range"))
+ raise oefmt(space.w_ValueError, "month out of range")
if not 1 <= rffi.getintfield(t_ref, 'c_tm_mday') <= 31:
- raise OperationError(space.w_ValueError,
- space.wrap("day of month out of range"))
+ raise oefmt(space.w_ValueError, "day of month out of range")
if not 0 <= rffi.getintfield(t_ref, 'c_tm_hour') <= 23:
- raise OperationError(space.w_ValueError,
- space.wrap("hour out of range"))
+ raise oefmt(space.w_ValueError, "hour out of range")
if not 0 <= rffi.getintfield(t_ref, 'c_tm_min') <= 59:
- raise OperationError(space.w_ValueError,
- space.wrap("minute out of range"))
+ raise oefmt(space.w_ValueError, "minute out of range")
if not 0 <= rffi.getintfield(t_ref, 'c_tm_sec') <= 61:
- raise OperationError(space.w_ValueError,
- space.wrap("seconds out of range"))
+ raise oefmt(space.w_ValueError, "seconds out of range")
# tm_wday does not need checking: "% 7" in _gettmarg() automatically
# restricts the range
if not 0 <= rffi.getintfield(t_ref, 'c_tm_yday') <= 365:
- raise OperationError(space.w_ValueError,
- space.wrap("day of year out of range"))
+ raise oefmt(space.w_ValueError, "day of year out of range")
def time(space):
"""time() -> floating point number
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -422,8 +422,8 @@
except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
- msg = "sequence.index(x): x not in sequence"
- raise OperationError(space.w_ValueError, space.wrap(msg))
+ raise oefmt(space.w_ValueError,
+ "sequence.index(x): x not in sequence")
if space.eq_w(w_next, w_item):
return space.wrap(index)
index += 1
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -468,17 +468,20 @@
if i >= length:
break
if i + 1 == length:
- raise OperationError(space.w_ValueError, space.wrap(
- "non-hexadecimal number found in fromhex() arg at position %d" % i))
+ raise oefmt(space.w_ValueError,
+ "non-hexadecimal number found in fromhex() arg at "
+ "position %d", i)
top = _hex_digit_to_int(s[i])
if top == -1:
- raise OperationError(space.w_ValueError, space.wrap(
- "non-hexadecimal number found in fromhex() arg at position %d" % i))
+ raise oefmt(space.w_ValueError,
+ "non-hexadecimal number found in fromhex() arg at "
+ "position %d", i)
bot = _hex_digit_to_int(s[i+1])
if bot == -1:
- raise OperationError(space.w_ValueError, space.wrap(
- "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,)))
+ raise oefmt(space.w_ValueError,
+ "non-hexadecimal number found in fromhex() arg at "
+ "position %d", i + 1)
data.append(chr(top*16 + bot))
return data
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -650,16 +650,15 @@
value = space.getindex_w(w_value, None)
if not 0 <= value < 256:
# this includes the OverflowError in case the long is too large
- raise OperationError(space.w_ValueError, space.wrap(
- "byte must be in range(0, 256)"))
+ raise oefmt(space.w_ValueError, "byte must be in range(0, 256)")
return chr(value)
def newbytesdata_w(space, w_source, encoding, errors):
# None value
if w_source is None:
if encoding is not None or errors is not None:
- raise OperationError(space.w_TypeError, space.wrap(
- "encoding or errors without string argument"))
+ raise oefmt(space.w_TypeError,
+ "encoding or errors without string argument")
return []
# Some object with __bytes__ special method
w_bytes_method = space.lookup(w_source, "__bytes__")
@@ -678,17 +677,16 @@
raise
else:
if count < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("negative count"))
+ raise oefmt(space.w_ValueError, "negative count")
if encoding is not None or errors is not None:
- raise OperationError(space.w_TypeError, space.wrap(
- "encoding or errors without string argument"))
+ raise oefmt(space.w_TypeError,
+ "encoding or errors without string argument")
return ['\0'] * count
# Unicode with encoding
if space.isinstance_w(w_source, space.w_unicode):
if encoding is None:
- raise OperationError(space.w_TypeError, space.wrap(
- "string argument without an encoding"))
+ raise oefmt(space.w_TypeError,
+ "string argument without an encoding")
from pypy.objspace.std.unicodeobject import encode_object
w_source = encode_object(space, w_source, encoding, errors)
# and continue with the encoded string
@@ -716,9 +714,8 @@
return [c for c in buf.as_str()]
if space.isinstance_w(w_source, space.w_unicode):
- raise OperationError(
- space.w_TypeError,
- space.wrap("cannot convert unicode object to bytes"))
+ raise oefmt(space.w_TypeError,
+ "cannot convert unicode object to bytes")
# sequence of bytes
w_iter = space.iter(w_source)
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -164,8 +164,8 @@
if self._hash == -1:
self._check_released(space)
if not self.buf.readonly:
- raise OperationError(space.w_ValueError, space.wrap(
- "cannot hash writable memoryview object"))
+ raise oefmt(space.w_ValueError,
+ "cannot hash writable memoryview object")
self._hash = compute_hash(self.buf.as_str())
return space.wrap(self._hash)
diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -204,13 +204,13 @@
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- "first maketrans argument must "
- "be a string if there is a second argument"))
+ raise oefmt(space.w_TypeError,
+ "first maketrans argument must be a string if "
+ "there is a second argument")
if len(x) != ylen:
- raise OperationError(space.w_ValueError, space.wrap(
- "the first two maketrans "
- "arguments must have equal length"))
+ raise oefmt(space.w_ValueError,
+ "the first two maketrans arguments must have "
+ "equal length")
# create entries for translating chars in x to those in y
for i in range(len(x)):
w_key = space.newint(ord(x[i]))
@@ -224,9 +224,9 @@
else:
# x must be a dict
if not space.is_w(space.type(w_x), space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap(
- "if you give only one argument "
- "to maketrans it must be a dict"))
+ raise oefmt(space.w_TypeError,
+ "if you give only one argument to maketrans it "
+ "must be a dict")
# copy entries into the new dict, converting string keys to int keys
w_iter = space.iter(space.call_method(w_x, "items"))
while True:
@@ -241,9 +241,9 @@
# convert string keys to integer keys
key = space.unicode_w(w_key)
if len(key) != 1:
- raise OperationError(space.w_ValueError, space.wrap(
- "string keys in translate "
- "table must be of length 1"))
+ raise oefmt(space.w_ValueError,
+ "string keys in translate table must be "
+ "of length 1")
w_key = space.newint(ord(key[0]))
else:
# just keep integer keys
@@ -252,9 +252,9 @@
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
- raise OperationError(space.w_TypeError, space.wrap(
- "keys in translate table must "
- "be strings or integers"))
+ raise oefmt(space.w_TypeError,
+ "keys in translate table must be strings "
+ "or integers")
space.setitem(w_new, w_key, w_value)
return w_new
From pypy.commits at gmail.com Mon May 2 23:01:33 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 20:01:33 -0700 (PDT)
Subject: [pypy-commit] pypy default: sync w/ py3k
Message-ID: <5728148d.cf8ec20a.d7757.520b@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84165:6fa84e77d63b
Date: 2016-05-02 19:57 -0700
http://bitbucket.org/pypy/pypy/changeset/6fa84e77d63b/
Log: sync w/ py3k
diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py
--- a/pypy/module/__builtin__/compiling.py
+++ b/pypy/module/__builtin__/compiling.py
@@ -35,7 +35,7 @@
if mode not in ('exec', 'eval', 'single'):
raise oefmt(space.w_ValueError,
- "compile() arg 3 must be 'exec' or 'eval' or 'single'")
+ "compile() arg 3 must be 'exec', 'eval' or 'single'")
if space.isinstance_w(w_source, space.gettypeobject(ast.W_AST.typedef)):
ast_node = ast.mod.from_object(space, w_source)
diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
--- a/pypy/module/posix/interp_posix.py
+++ b/pypy/module/posix/interp_posix.py
@@ -32,7 +32,7 @@
pass
else:
def check_uid_range(space, num):
- if num < -(1<<31) or num >= (1<<32):
+ if num < -(1 << 31) or num >= (1 << 32):
raise oefmt(space.w_OverflowError, "integer out of range")
def fsencode_w(space, w_obj):
From pypy.commits at gmail.com Mon May 2 23:01:35 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 20:01:35 -0700 (PDT)
Subject: [pypy-commit] pypy default: refactor
Message-ID: <5728148f.43ecc20a.ad0d1.4f64@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84166:6479edc9c369
Date: 2016-05-02 19:58 -0700
http://bitbucket.org/pypy/pypy/changeset/6479edc9c369/
Log: refactor
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -51,13 +51,10 @@
or not space.isinstance_w(
space.getitem(w_res, space.wrap(0)),
space.w_unicode)):
- if decode:
- msg = ("decoding error handler must return "
- "(unicode, int) tuple, not %R")
- else:
- msg = ("encoding error handler must return "
- "(unicode, int) tuple, not %R")
- raise oefmt(space.w_TypeError, msg, w_res)
+ raise oefmt(space.w_TypeError,
+ "%s error handler must return (unicode, int) "
+ "tuple, not %R",
+ "decoding" if decode else "encoding", w_res)
w_replace, w_newpos = space.fixedview(w_res, 2)
newpos = space.int_w(w_newpos)
if newpos < 0:
From pypy.commits at gmail.com Mon May 2 23:01:37 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 02 May 2016 20:01:37 -0700 (PDT)
Subject: [pypy-commit] pypy default: None more appropriate
Message-ID: <57281491.43ecc20a.ad0d1.4f68@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84167:7dc2f3884df2
Date: 2016-05-02 19:58 -0700
http://bitbucket.org/pypy/pypy/changeset/7dc2f3884df2/
Log: None more appropriate
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -160,7 +160,7 @@
raise oefmt(space.w_SystemError,
"the bz2 library has received wrong parameters")
elif bzerror == BZ_MEM_ERROR:
- raise OperationError(space.w_MemoryError, space.wrap(""))
+ raise OperationError(space.w_MemoryError, space.w_None)
elif bzerror in (BZ_DATA_ERROR, BZ_DATA_ERROR_MAGIC):
raise oefmt(space.w_IOError, "invalid data stream")
elif bzerror == BZ_IO_ERROR:
From pypy.commits at gmail.com Tue May 3 03:26:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 00:26:04 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: still progressing, slowly
Message-ID: <5728528c.a553c20a.2fb9d.ffff9b0b@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84168:67a03224c02d
Date: 2016-05-03 09:26 +0200
http://bitbucket.org/pypy/pypy/changeset/67a03224c02d/
Log: still progressing, slowly
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -1,6 +1,7 @@
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.debug import ll_assert
+from rpython.rlib.objectmodel import we_are_translated
from rpython.memory.gcheader import GCHeaderBuilder
from rpython.memory.support import DEFAULT_CHUNK_SIZE
from rpython.memory.support import get_address_stack, get_address_deque
@@ -36,8 +37,26 @@
def setup(self):
# all runtime mutable values' setup should happen here
# and in its overriden versions! for the benefit of test_transformed_gc
- self.finalizer_lock_count = 0
- self.run_finalizers = self.AddressDeque()
+ self.finalizer_lock = False
+ if we_are_translated():
+ XXXXXX
+ else:
+ self._finalizer_queue_objects = [] # XXX FIX ME
+
+ def register_finalizer_index(self, fq, index):
+ while len(self._finalizer_queue_objects) <= index:
+ self._finalizer_queue_objects.append(None)
+ if self._finalizer_queue_objects[index] is None:
+ fq._reset()
+ self._finalizer_queue_objects[index] = fq
+ else:
+ assert self._finalizer_queue_objects[index] is fq
+
+ def add_finalizer_to_run(self, fq_index, obj):
+ if we_are_translated():
+ XXXXXX
+ else:
+ self._finalizer_queue_objects[fq_index]._queue.append(obj)
def post_setup(self):
# More stuff that needs to be initialized when the GC is already
@@ -60,6 +79,7 @@
def set_query_functions(self, is_varsize, has_gcptr_in_varsize,
is_gcarrayofgcptr,
+ finalizer_trigger,
destructor_or_custom_trace,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
@@ -73,6 +93,7 @@
fast_path_tracing,
has_gcptr,
cannot_pin):
+ self.finalizer_trigger = finalizer_trigger
self.destructor_or_custom_trace = destructor_or_custom_trace
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
@@ -320,9 +341,17 @@
callback2, attrname = _convert_callback_formats(callback) # :-/
setattr(self, attrname, arg)
self.root_walker.walk_roots(callback2, callback2, callback2)
- self.run_finalizers.foreach(callback, arg)
+ self.enum_pending_finalizers(callback, arg)
enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)'
+ def enum_pending_finalizers(self, callback, arg):
+ if we_are_translated():
+ XXXXXX #. foreach(callback, arg)
+ for fq in self._finalizer_queue_objects:
+ for obj in fq._queue:
+ callback(obj, arg)
+ enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
+
def debug_check_consistency(self):
"""To use after a collection. If self.DEBUG is set, this
enumerates all roots and traces all objects to check if we didn't
@@ -362,17 +391,17 @@
pass
def execute_finalizers(self):
- self.finalizer_lock_count += 1
+ if self.finalizer_lock:
+ return # the outer invocation of execute_finalizers() will do it
+ self.finalizer_lock = True
try:
- while self.run_finalizers.non_empty():
- if self.finalizer_lock_count > 1:
- # the outer invocation of execute_finalizers() will do it
- break
- obj = self.run_finalizers.popleft()
- finalizer = self.getfinalizer(self.get_type_id(obj))
- finalizer(obj)
+ if we_are_translated():
+ XXXXXX
+ for i, fq in enumerate(self._finalizer_queue_objects):
+ if len(fq._queue) > 0:
+ self.finalizer_trigger(i)
finally:
- self.finalizer_lock_count -= 1
+ self.finalizer_lock = False
class MovingGCBase(GCBase):
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2422,7 +2422,7 @@
#
# If we are in an inner collection caused by a call to a finalizer,
# the 'run_finalizers' objects also need to be kept alive.
- self.run_finalizers.foreach(self._collect_obj, None)
+ self.enum_pending_finalizers(self._collect_obj, None)
def enumerate_all_roots(self, callback, arg):
self.prebuilt_root_objects.foreach(callback, arg)
@@ -2676,8 +2676,9 @@
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
if state == 2:
- # XXX use fq_nr here
- self.run_finalizers.append(x)
+ from rpython.rtyper.lltypesystem import rffi
+ fq_index = rffi.cast(lltype.Signed, fq_nr)
+ self.add_finalizer_to_run(fq_index, x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -83,6 +83,12 @@
ANY = (T_HAS_GCPTR | T_IS_WEAKREF)
return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
+ def init_finalizer_trigger(self, finalizer_trigger):
+ self.finalizer_trigger = finalizer_trigger
+
+ def q_finalizer_trigger(self, fq_index):
+ self.finalizer_trigger(fq_index)
+
def q_destructor_or_custom_trace(self, typeid):
return self.get(typeid).customfunc
@@ -136,6 +142,7 @@
self.q_is_varsize,
self.q_has_gcptr_in_varsize,
self.q_is_gcarrayofgcptr,
+ self.q_finalizer_trigger,
self.q_destructor_or_custom_trace,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
@@ -374,13 +381,17 @@
return result
def make_destructor_funcptr_for_type(self, TYPE):
- # must be overridden for proper finalizer support
+ # must be overridden for proper destructor support
return None
def make_custom_trace_funcptr_for_type(self, TYPE):
# must be overridden for proper custom tracer support
return None
+ def make_finalizer_trigger(self):
+ # must be overridden for proper finalizer support
+ return None
+
def initialize_gc_query_function(self, gc):
gcdata = GCData(self.type_info_group)
gcdata.set_query_functions(gc)
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -1,6 +1,6 @@
from rpython.translator.backendopt.finalizer import FinalizerAnalyzer
from rpython.rtyper.lltypesystem import lltype, llmemory, llheap
-from rpython.rtyper import llinterp
+from rpython.rtyper import llinterp, rclass
from rpython.rtyper.annlowlevel import llhelper
from rpython.memory import gctypelayout
from rpython.flowspace.model import Constant
@@ -16,12 +16,14 @@
chunk_size = 10,
translated_to_c = False,
**GC_PARAMS)
+ self.translator = translator
self.gc.set_root_walker(LLInterpRootWalker(self))
self.gc.DEBUG = True
self.llinterp = llinterp
self.prepare_graphs(flowgraphs)
self.gc.setup()
- self.finalizer_queues = {}
+ self.finalizer_queue_indexes = {}
+ self.finalizer_queues = []
self.has_write_barrier_from_array = hasattr(self.gc,
'write_barrier_from_array')
@@ -32,6 +34,7 @@
self.llinterp)
self.get_type_id = layoutbuilder.get_type_id
gcdata = layoutbuilder.initialize_gc_query_function(self.gc)
+ gcdata.init_finalizer_trigger(self.finalizer_trigger)
constants = collect_constants(flowgraphs)
for obj in constants:
@@ -189,18 +192,38 @@
def thread_run(self):
pass
+ def finalizer_trigger(self, fq_index):
+ fq = self.finalizer_queues[fq_index]
+ graph = self.translator._graphof(fq.finalizer_trigger.im_func)
+ try:
+ self.llinterp.eval_graph(graph, [None], recursive=True)
+ except llinterp.LLException:
+ raise RuntimeError(
+ "finalizer_trigger() raised an exception, shouldn't happen")
+
def get_finalizer_queue_index(self, fq_tag):
assert fq_tag.expr == 'FinalizerQueue TAG'
fq = fq_tag.default
- return self.finalizer_queues.setdefault(fq, len(self.finalizer_queues))
+ try:
+ index = self.finalizer_queue_indexes[fq]
+ except KeyError:
+ index = len(self.finalizer_queue_indexes)
+ assert index == len(self.finalizer_queues)
+ self.finalizer_queue_indexes[fq] = index
+ self.finalizer_queues.append(fq)
+ return (fq, index)
def gc_fq_next_dead(self, fq_tag):
- index = self.get_finalizer_queue_index(fq_tag)
- xxx
+ fq, _ = self.get_finalizer_queue_index(fq_tag)
+ addr = fq.next_dead()
+ if addr is None:
+ addr = llmemory.NULL
+ return llmemory.cast_adr_to_ptr(addr, rclass.OBJECTPTR)
def gc_fq_register(self, fq_tag, ptr):
- index = self.get_finalizer_queue_index(fq_tag)
+ fq, index = self.get_finalizer_queue_index(fq_tag)
ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
+ self.gc.register_finalizer_index(fq, index)
self.gc.register_finalizer(index, ptr)
# ____________________________________________________________
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -390,7 +390,8 @@
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.rclass import OBJECTPTR
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
- ptr = llop.gc_fq_next_dead(OBJECTPTR, self._get_tag())
+ tag = FinalizerQueue._get_tag(self)
+ ptr = llop.gc_fq_next_dead(OBJECTPTR, tag)
return cast_base_ptr_to_instance(self.Class, ptr)
try:
return self._queue.popleft()
@@ -404,24 +405,27 @@
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.rclass import OBJECTPTR
from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr
+ tag = FinalizerQueue._get_tag(self)
ptr = cast_instance_to_base_ptr(obj)
- llop.gc_fq_register(lltype.Void, self._get_tag(), ptr)
+ llop.gc_fq_register(lltype.Void, tag, ptr)
return
else:
self._untranslated_register_finalizer(obj)
- @specialize.memo()
def _get_tag(self):
- return CDefinedIntSymbolic('FinalizerQueue TAG', default=self)
+ "NOT_RPYTHON: special-cased below"
+
+ def _reset(self):
+ import collections
+ self._weakrefs = set()
+ self._queue = collections.deque()
def _untranslated_register_finalizer(self, obj):
if hasattr(obj, '__enable_del_for_id'):
return # already called
if not hasattr(self, '_queue'):
- import collections
- self._weakrefs = set()
- self._queue = collections.deque()
+ self._reset()
# Fetch and check the type of 'obj'
objtyp = obj.__class__
@@ -483,6 +487,23 @@
_fq_patched_classes = set()
+class FqTagEntry(ExtRegistryEntry):
+ _about_ = FinalizerQueue._get_tag.im_func
+
+ def compute_result_annotation(self, s_fq):
+ assert s_fq.is_constant()
+ fq = s_fq.const
+ s_func = self.bookkeeper.immutablevalue(fq.finalizer_trigger)
+ self.bookkeeper.emulate_pbc_call(self.bookkeeper.position_key,
+ s_func, [])
+ if not hasattr(fq, '_fq_tag'):
+ fq._fq_tag = CDefinedIntSymbolic('FinalizerQueue TAG', default=fq)
+ return self.bookkeeper.immutablevalue(fq._fq_tag)
+
+ def specialize_call(self, hop):
+ hop.exception_cannot_occur()
+ return hop.inputconst(lltype.Signed, hop.s_result.const)
+
# ____________________________________________________________
From pypy.commits at gmail.com Tue May 3 06:39:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 03:39:42 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update the values
Message-ID: <57287fee.4ca51c0a.2cbe3.ffffa048@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r743:71a15c30baf9
Date: 2016-05-03 12:40 +0200
http://bitbucket.org/pypy/pypy.org/changeset/71a15c30baf9/
Log: update the values
diff --git a/don1.html b/don1.html
--- a/don1.html
+++ b/don1.html
@@ -15,7 +15,7 @@
- $63928 of $105000 (60.9%)
+ $63957 of $105000 (60.9%)
@@ -23,7 +23,7 @@
This donation goes towards supporting Python 3 in PyPy.
Current status:
- we have $8995 left
+ we have $9021 left
in the account. Read proposal
From pypy.commits at gmail.com Tue May 3 06:39:49 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 03:39:49 -0700 (PDT)
Subject: [pypy-commit] pypy default: document branch
Message-ID: <57287ff5.e109c20a.524b3.fffff427@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84169:4d09e9d8eb6e
Date: 2016-05-03 12:39 +0200
http://bitbucket.org/pypy/pypy/changeset/4d09e9d8eb6e/
Log: document branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -61,3 +61,8 @@
calls PyXxx", we now silently acquire/release the GIL. Helps with
CPython C extension modules that call some PyXxx() functions without
holding the GIL (arguably, they are theorically buggy).
+
+.. branch: cpyext-test-A
+
+Get the cpyext tests to pass with "-A" (i.e. when tested directly with
+CPython).
From pypy.commits at gmail.com Tue May 3 10:37:21 2016
From: pypy.commits at gmail.com (mattip)
Date: Tue, 03 May 2016 07:37:21 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update and rebuild
Message-ID: <5728b7a1.d1981c0a.f1fc1.155e@mx.google.com>
Author: Matti Picus
Branch: extradoc
Changeset: r744:abd477cc44a9
Date: 2016-05-03 17:37 +0300
http://bitbucket.org/pypy/pypy.org/changeset/abd477cc44a9/
Log: update and rebuild
diff --git a/download.html b/download.html
--- a/download.html
+++ b/download.html
@@ -74,7 +74,7 @@
performance improvements.
We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:
@@ -116,18 +116,18 @@
Python2.7 compatible PyPy 5.1
-- Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
-- Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
-- ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian) (see [1] below)
-- ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
-- ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
-- Mac OS/X binary (64bit)
+- Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
+- Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
+- ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian) (see [1] below)
+- ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
+- ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
+- Mac OS/X binary (64bit)
- FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
-- Windows binary (32bit) (you might need the VS 2008 runtime library
+
- Windows binary (32bit) (you might need the VS 2008 runtime library
installer vcredist_x86.exe.)
-- PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
-- PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
-- Source (tar.bz2); Source (zip). See below for more about the sources.
+- PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
+- PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
+- Source (tar.bz2); Source (zip). See below for more about the sources.
- All our downloads, including previous versions. We also have a
mirror, but please use only if you have troubles accessing the links above
@@ -196,7 +196,7 @@
uncompressed, they run in-place. For now you can uncompress them
either somewhere in your home directory or, say, in
/opt, and
if you want, put a symlink from somewhere like
-
/usr/local/bin/pypy to
/path/to/pypy-5.1.0/bin/pypy. Do
+
/usr/local/bin/pypy to
/path/to/pypy-5.1.1/bin/pypy. Do
not move or copy the executable
pypy outside the tree – put
a symlink to it, otherwise it will not find its libraries.
@@ -217,10 +217,7 @@
If you have pip:
pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
-pypy -m pip install git+https://bitbucket.org/pypy/numpy.git@pypy-5.1
-(the second version selects a particular tag, which may be needed if your
-pypy is not the latest development version.)
Alternatively, the direct way:
git clone https://bitbucket.org/pypy/numpy.git
@@ -241,7 +238,7 @@
Get the source code. The following packages contain the source at
the same revision as the above binaries:
Or you can checkout the current trunk using Mercurial (the trunk
usually works and is of course more up-to-date):
@@ -379,6 +376,19 @@
Checksums
Here are the checksums for each of the downloads
+
pypy-5.1.1 md5:
+
+3fa98eb80ef5caa5a6f9d4468409a632 pypy-5.1.1-linux64.tar.bz2
+1d5874f076d18ecd4fd50054cca0c383 pypy-5.1.1-linux-armel.tar.bz2
+9e47e370d57293074bbef6c4c0c4736d pypy-5.1.1-linux-armhf-raring.tar.bz2
+b6643215abc92ed8efd94e6205305a36 pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+224e4d5870d88fb444d8f4f1791140e5 pypy-5.1.1-linux.tar.bz2
+e35510b39e34f1c2199c283bf8655e5c pypy-5.1.1-osx64.tar.bz2
+9d8b82448416e0203efa325364f759e8 pypy-5.1.1-s390x.tar.bz2
+7aff685c28941fda6a74863c53931e38 pypy-5.1.1-src.tar.bz2
+ee9795d8638d34126ca24e4757a73056 pypy-5.1.1-src.zip
+d70b4385fbf0a5e5260f6b7bedb231d4 pypy-5.1.1-win32.zip
+
pypy-5.1.0 md5:
17baf9db5200559b9d6c45ec8f60ea48 pypy-5.1.0-linux-armel.tar.bz2
@@ -409,6 +419,19 @@
2c9f0054f3b93a6473f10be35277825a pypy-1.8-sandbox-linux64.tar.bz2
009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2
+
pypy-5.1.1 sha1:
+
+9ffc1fe9dfeec77a705b0d1af257da7e87894f5a pypy-5.1.1-linux64.tar.bz2
+e432b157bc4cd2b5a21810ff45fd9a1507e8b8bf pypy-5.1.1-linux-armel.tar.bz2
+5ed85f83566a4de5838c8b549943cb79250386ad pypy-5.1.1-linux-armhf-raring.tar.bz2
+ddd1c20e049fcbc01f2bd9173ad77033540722a9 pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+6767056bb71081bce8fcee04de0d0be02d71d4f9 pypy-5.1.1-linux.tar.bz2
+734eb82489d57a3b2b55d6b83153b3972dc6781d pypy-5.1.1-osx64.tar.bz2
+2440d613430f9dfc57bc8db5cfd087f1169ee2d0 pypy-5.1.1-s390x.tar.bz2
+34eca157e025e65f9dc1f419fa56ce31ad635e9c pypy-5.1.1-src.tar.bz2
+95596b62cf2bb6ebd4939584040e713ceec9ef0a pypy-5.1.1-src.zip
+3694e37c1cf6a2a938c108ee69126e4f40a0886e pypy-5.1.1-win32.zip
+
pypy-5.1.0 sha1:
114d4f981956b83cfbc0a3c819fdac0b0550cd82 pypy-5.1.0-linux-armel.tar.bz2
@@ -422,6 +445,19 @@
a184ef5ada93d53e8dc4a9850a9ed764bd661d7b pypy-5.1.0-src.zip
4daba0932afcc4755d93d55aa3cbdd851da9198d pypy-5.1.0-win32.zip
+
pypy-5.1.1 sha256:
+
+c852622e8bc81618c137da35fcf57b2349b956c07b6fd853300846e3cefa64fc pypy-5.1.1-linux64.tar.bz2
+062b33641c24dfc8c6b5af955c2ddf3815b471de0af4bfc343020651b94d13bf pypy-5.1.1-linux-armel.tar.bz2
+c4bcdabccd15669ea44d1c715cd36b2ca55b340a27b63e1a92ef5ab6eb158a8d pypy-5.1.1-linux-armhf-raring.tar.bz2
+fc2a1f8719a7eca5d85d0bdcf499c6ab7409fc32aa312435bcbe66950b47e863 pypy-5.1.1-linux-armhf-raspbian.tar.bz2
+7951fd2b87c9e621ec57c932c20da2b8a4a9e87d8daeb9e2b7373f9444219abc pypy-5.1.1-linux.tar.bz2
+fe2bbb7cf95eb91b1724029f81e85d1dbb6025a2e9a005cfe7258fe07602f771 pypy-5.1.1-osx64.tar.bz2
+4acd1066e07eb668665b302bf8e9338b6df136082c5ce28c62b70c6bb1b5cf9f pypy-5.1.1-s390x.tar.bz2
+99aff0c710c46903b821c7c436f9cb9de16bd7370d923f99cc7c28a66be6c5b2 pypy-5.1.1-src.tar.bz2
+7c0c5157e7977674aa942de3c20ff0567f7af986824f6674e2424f6089c41501 pypy-5.1.1-src.zip
+22a780e328ef053e098f2edc2302957ac3119adf7bf11ff23e225931806e7bcd pypy-5.1.1-win32.zip
+
pypy-5.1.0 sha256:
ea7017449ff0630431866423220c3688fc55c1a0b80a96af0ae138dd0751b81c pypy-5.1.0-linux-armel.tar.bz2
diff --git a/source/download.txt b/source/download.txt
--- a/source/download.txt
+++ b/source/download.txt
@@ -258,9 +258,9 @@
1. Get the source code. The following packages contain the source at
the same revision as the above binaries:
- * `pypy-5.1.0-src.tar.bz2`__ (sources)
+ * `pypy-5.1.1-src.tar.bz2`__ (sources)
- .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-src.tar.bz2
+ .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.1-src.tar.bz2
Or you can checkout the current trunk using Mercurial_ (the trunk
usually works and is of course more up-to-date)::
From pypy.commits at gmail.com Tue May 3 11:41:23 2016
From: pypy.commits at gmail.com (marky1991)
Date: Tue, 03 May 2016 08:41:23 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Update islice to not accept floats to
match cpython and update own tests to match islice's reference-holding
behavior in cpython.
Message-ID: <5728c6a3.161b1c0a.f0840.ffffbc2b@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84170:acbc6e257771
Date: 2016-05-01 15:58 -0400
http://bitbucket.org/pypy/pypy/changeset/acbc6e257771/
Log: Update islice to not accept floats to match cpython and update own
tests to match islice's reference-holding behavior in cpython.
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -319,7 +319,7 @@
def arg_int_w(self, w_obj, minimum, errormsg):
space = self.space
try:
- result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters
+ result = space.int_w(w_obj)
except OperationError, e:
if e.async(space):
raise
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -198,11 +198,8 @@
assert next(it) == x
raises(StopIteration, next, it)
- # CPython implementation allows floats
- it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0)
- for x in [1, 3]:
- assert next(it) == x
- raises(StopIteration, next, it)
+ #Do not allow floats
+ raises(ValueError, itertools.islice, [1, 2, 3, 4, 5], 0.0, 3.0, 2.0)
it = itertools.islice([1, 2, 3], 0, None)
for x in [1, 2, 3]:
@@ -216,8 +213,6 @@
assert list(itertools.islice(range(10), None,None)) == list(range(10))
assert list(itertools.islice(range(10), None,None,None)) == list(range(10))
- # check source iterator is not referenced from islice()
- # after the latter has been exhausted
import weakref
for args in [(1,), (None,), (0, None, 2)]:
it = (x for x in (1, 2, 3))
@@ -226,7 +221,7 @@
assert wr() is not None
list(it) # exhaust the iterator
import gc; gc.collect()
- assert wr() is None
+ assert wr() is not None
raises(StopIteration, next, it)
def test_islice_dropitems_exact(self):
From pypy.commits at gmail.com Tue May 3 11:41:25 2016
From: pypy.commits at gmail.com (marky1991)
Date: Tue, 03 May 2016 08:41:25 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix pep 8 issue.
Message-ID: <5728c6a5.d72d1c0a.b79c8.ffffb3ee@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84171:6db07c7d7059
Date: 2016-05-02 06:53 -0400
http://bitbucket.org/pypy/pypy/changeset/6db07c7d7059/
Log: Fix pep 8 issue.
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -198,7 +198,7 @@
assert next(it) == x
raises(StopIteration, next, it)
- #Do not allow floats
+ # Do not allow floats
raises(ValueError, itertools.islice, [1, 2, 3, 4, 5], 0.0, 3.0, 2.0)
it = itertools.islice([1, 2, 3], 0, None)
From pypy.commits at gmail.com Tue May 3 11:41:26 2016
From: pypy.commits at gmail.com (marky1991)
Date: Tue, 03 May 2016 08:41:26 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Hopefully merged correctly.
Message-ID: <5728c6a6.22c8c20a.3502f.ffff82a9@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84172:230df15b48da
Date: 2016-05-03 10:36 -0400
http://bitbucket.org/pypy/pypy/changeset/230df15b48da/
Log: Hopefully merged correctly.
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -360,7 +360,7 @@
def arg_int_w(self, w_obj, minimum, errormsg):
space = self.space
try:
- result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters
+ result = space.int_w(w_obj)
except OperationError as e:
if e.async(space):
raise
@@ -581,7 +581,7 @@
space = self.space
try:
return space.next(w_iter)
- except OperationError as e:
+ except OperationError, e:
if not e.match(space, space.w_StopIteration):
raise
self.active -= 1
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -199,11 +199,8 @@
assert next(it) == x
raises(StopIteration, next, it)
- # CPython implementation allows floats
- it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0)
- for x in [1, 3]:
- assert next(it) == x
- raises(StopIteration, next, it)
+ # Do not allow floats
+ raises(ValueError, itertools.islice, [1, 2, 3, 4, 5], 0.0, 3.0, 2.0)
it = itertools.islice([1, 2, 3], 0, None)
for x in [1, 2, 3]:
@@ -217,8 +214,6 @@
assert list(itertools.islice(range(10), None,None)) == list(range(10))
assert list(itertools.islice(range(10), None,None,None)) == list(range(10))
- # check source iterator is not referenced from islice()
- # after the latter has been exhausted
import weakref
for args in [(1,), (None,), (0, None, 2)]:
it = (x for x in (1, 2, 3))
@@ -227,7 +222,7 @@
assert wr() is not None
list(it) # exhaust the iterator
import gc; gc.collect()
- assert wr() is None
+ assert wr() is not None
raises(StopIteration, next, it)
def test_islice_dropitems_exact(self):
From pypy.commits at gmail.com Tue May 3 11:41:28 2016
From: pypy.commits at gmail.com (marky1991)
Date: Tue, 03 May 2016 08:41:28 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Didn't quite merge properly.
Message-ID: <5728c6a8.a9a1c20a.e346d.ffff8465@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84173:f8ec043ed111
Date: 2016-05-03 10:48 -0400
http://bitbucket.org/pypy/pypy/changeset/f8ec043ed111/
Log: Didn't quite merge properly.
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -581,7 +581,7 @@
space = self.space
try:
return space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
self.active -= 1
From pypy.commits at gmail.com Tue May 3 14:16:07 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 03 May 2016 11:16:07 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Remove evil hack
Message-ID: <5728eae7.2413c30a.2c26c.273a@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r84174:a7070af42caa
Date: 2016-05-03 19:15 +0100
http://bitbucket.org/pypy/pypy/changeset/a7070af42caa/
Log: Remove evil hack
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -215,13 +215,6 @@
MARSHAL_VERSION_FOR_PYC = 2
def get_pyc_magic(space):
- # XXX CPython testing hack: delegate to the real imp.get_magic
- if not we_are_translated():
- if '__pypy__' not in space.builtin_modules:
- import struct
- magic = __import__('imp').get_magic()
- return struct.unpack('
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84175:270bb3186930
Date: 2016-05-03 21:18 +0200
http://bitbucket.org/pypy/pypy/changeset/270bb3186930/
Log: Pass test_incminimark_gc -k test_finalizer
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -192,18 +192,27 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- C()
+ fq.register_finalizer(self)
class C(A):
- def __del__(self):
- b.num_deleted += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted += 1
+ if not isinstance(a, C):
+ C()
+ fq = FQ()
def f(x):
a = A()
i = 0
while i < x:
i += 1
a = A()
+ a = None
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
@@ -220,15 +229,21 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- llop.gc__collect(lltype.Void)
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ llop.gc__collect(lltype.Void)
+ fq = FQ()
def f(x):
a = A()
i = 0
while i < x:
i += 1
a = A()
+ a = None
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
@@ -245,15 +260,24 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- b.a = self
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted += 1
+ b.a = a
+ fq = FQ()
def f(x):
a = A()
i = 0
while i < x:
i += 1
a = A()
+ a = None
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
aid = b.a.id
@@ -320,7 +344,7 @@
res = self.interpret(f, [])
assert res
- def test_weakref_to_object_with_finalizer(self):
+ def test_weakref_to_object_with_destructor(self):
import weakref
class A(object):
count = 0
@@ -340,6 +364,32 @@
res = self.interpret(f, [])
assert res
+ def test_weakref_to_object_with_finalizer(self):
+ import weakref
+ class A(object):
+ count = 0
+ a = A()
+ class B(object):
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ a.count += 1
+ fq = FQ()
+ def g():
+ b = B()
+ fq.register_finalizer(b)
+ return weakref.ref(b)
+ def f():
+ ref = g()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ result = a.count == 1 and (ref() is None)
+ return result
+ res = self.interpret(f, [])
+ assert res
+
def test_bug_1(self):
import weakref
class B(object):
@@ -478,9 +528,14 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- b.all.append(D(b.num_deleted))
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ b.all.append(D(b.num_deleted))
+ fq = FQ()
class D(object):
# make a big object that does not use malloc_varsize
def __init__(self, x):
@@ -491,6 +546,7 @@
i = 0
all = [None] * x
a = A()
+ del a
while i < x:
d = D(i)
all[i] = d
diff --git a/rpython/memory/test/snippet.py b/rpython/memory/test/snippet.py
--- a/rpython/memory/test/snippet.py
+++ b/rpython/memory/test/snippet.py
@@ -1,5 +1,6 @@
import os, py
from rpython.tool.udir import udir
+from rpython.rlib import rgc
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
@@ -61,12 +62,21 @@
def __init__(self, key):
self.key = key
self.refs = []
- def __del__(self):
+ fq.register_finalizer(self)
+
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
from rpython.rlib.debug import debug_print
- debug_print("DEL:", self.key)
- assert age_of(self.key) == -1
- set_age_of(self.key, state.time)
- state.progress = True
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ debug_print("DEL:", a.key)
+ assert age_of(a.key) == -1
+ set_age_of(a.key, state.time)
+ state.progress = True
+ fq = FQ()
def build_example(input):
state.time = 0
diff --git a/rpython/translator/backendopt/finalizer.py b/rpython/translator/backendopt/finalizer.py
--- a/rpython/translator/backendopt/finalizer.py
+++ b/rpython/translator/backendopt/finalizer.py
@@ -20,7 +20,8 @@
"""
ok_operations = ['ptr_nonzero', 'ptr_eq', 'ptr_ne', 'free', 'same_as',
'direct_ptradd', 'force_cast', 'track_alloc_stop',
- 'raw_free', 'adr_eq', 'adr_ne']
+ 'raw_free', 'adr_eq', 'adr_ne',
+ 'debug_print']
def check_light_finalizer(self, graph):
self._origin = graph
From pypy.commits at gmail.com Tue May 3 15:31:18 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 12:31:18 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Pass all of test_incminimark_gc
Message-ID: <5728fc86.d2aa1c0a.80c95.4495@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84176:a12a83151bb7
Date: 2016-05-03 21:31 +0200
http://bitbucket.org/pypy/pypy/changeset/a12a83151bb7/
Log: Pass all of test_incminimark_gc
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2587,20 +2587,23 @@
# ----------
# Finalizers
+ def call_destructor(self, obj):
+ destructor = self.destructor_or_custom_trace(self.get_type_id(obj))
+ ll_assert(bool(destructor), "no destructor found")
+ destructor(obj)
+
def deal_with_young_objects_with_destructors(self):
"""We can reasonably assume that destructors don't do
anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
- while self.young_objects_with_light_finalizers.non_empty():
- obj = self.young_objects_with_light_finalizers.pop()
+ while self.young_objects_with_destructors.non_empty():
+ obj = self.young_objects_with_destructors.pop()
if not self.is_forwarded(obj):
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
+ self.call_destructor(obj)
else:
obj = self.get_forwarding_address(obj)
- self.old_objects_with_light_finalizers.append(obj)
+ self.old_objects_with_destructors.append(obj)
def deal_with_old_objects_with_destructors(self):
"""We can reasonably assume that destructors don't do
@@ -2608,18 +2611,16 @@
they won't resurrect objects
"""
new_objects = self.AddressStack()
- while self.old_objects_with_light_finalizers.non_empty():
- obj = self.old_objects_with_light_finalizers.pop()
+ while self.old_objects_with_destructors.non_empty():
+ obj = self.old_objects_with_destructors.pop()
if self.header(obj).tid & GCFLAG_VISITED:
# surviving
new_objects.append(obj)
else:
# dying
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
- self.old_objects_with_light_finalizers.delete()
- self.old_objects_with_light_finalizers = new_objects
+ self.call_destructor(obj)
+ self.old_objects_with_destructors.delete()
+ self.old_objects_with_destructors = new_objects
def deal_with_young_objects_with_finalizers(self):
while self.probably_young_objects_with_finalizers.non_empty():
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -282,7 +282,7 @@
llop.gc__collect(lltype.Void)
aid = b.a.id
b.a = None
- # check that __del__ is not called again
+ # check that finalizer_trigger() is not called again
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted * 10 + aid + 100 * (b.a is None)
@@ -409,23 +409,32 @@
res = self.interpret(f, [])
assert res
- def test_cycle_with_weakref_and_del(self):
+ def test_cycle_with_weakref_and_finalizer(self):
import weakref
class A(object):
count = 0
a = A()
class B(object):
- def __del__(self):
- # when __del__ is called, the weakref to c should be dead
- if self.ref() is None:
- a.count += 10 # ok
- else:
- a.count = 666 # not ok
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while True:
+ b = self.next_dead()
+ if b is None:
+ break
+ # when we are here, the weakref to c should be dead
+ if b.ref() is None:
+ a.count += 10 # ok
+ else:
+ a.count = 666 # not ok
+ fq = FQ()
class C(object):
pass
def g():
c = C()
c.b = B()
+ fq.register_finalizer(c.b)
ref = weakref.ref(c)
c.b.ref = ref
return ref
@@ -445,23 +454,32 @@
a = A()
expected_invalid = self.WREF_IS_INVALID_BEFORE_DEL_IS_CALLED
class B(object):
- def __del__(self):
- # when __del__ is called, the weakref to myself is still valid
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ # when we are here, the weakref to myself is still valid
# in RPython with most GCs. However, this can lead to strange
# bugs with incminimark. https://bugs.pypy.org/issue1687
# So with incminimark, we expect the opposite.
- if expected_invalid:
- if self.ref() is None:
- a.count += 10 # ok
+ while True:
+ b = self.next_dead()
+ if b is None:
+ break
+ if expected_invalid:
+ if b.ref() is None:
+ a.count += 10 # ok
+ else:
+ a.count = 666 # not ok
else:
- a.count = 666 # not ok
- else:
- if self.ref() is self:
- a.count += 10 # ok
- else:
- a.count = 666 # not ok
+ if b.ref() is self:
+ a.count += 10 # ok
+ else:
+ a.count = 666 # not ok
+ fq = FQ()
def g():
b = B()
+ fq.register_finalizer(b)
ref = weakref.ref(b)
b.ref = ref
return ref
@@ -479,10 +497,19 @@
class A(object):
pass
class B(object):
- def __del__(self):
- self.wref().x += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while True:
+ b = self.next_dead()
+ if b is None:
+ break
+ b.wref().x += 1
+ fq = FQ()
def g(a):
b = B()
+ fq.register_finalizer(b)
b.wref = weakref.ref(a)
# the only way to reach this weakref is via B, which is an
# object with finalizer (but the weakref itself points to
@@ -567,15 +594,24 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- llop.gc__collect(lltype.Void)
- b.num_deleted += 1
- C()
- C()
+ fq.register_finalizer(self)
class C(A):
- def __del__(self):
- b.num_deleted += 1
- b.num_deleted_c += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ llop.gc__collect(lltype.Void)
+ b.num_deleted += 1
+ if isinstance(a, C):
+ b.num_deleted_c += 1
+ else:
+ C()
+ C()
+ fq = FQ()
def f(x, y):
persistent_a1 = A()
persistent_a2 = A()
diff --git a/rpython/memory/test/snippet.py b/rpython/memory/test/snippet.py
--- a/rpython/memory/test/snippet.py
+++ b/rpython/memory/test/snippet.py
@@ -53,7 +53,7 @@
def set_age_of(c, newvalue):
# NB. this used to be a dictionary, but setting into a dict
# consumes memory. This has the effect that this test's
- # __del__ methods can consume more memory and potentially
+ # finalizer_trigger method can consume more memory and potentially
# cause another collection. This would result in objects
# being unexpectedly destroyed at the same 'state.time'.
state.age[ord(c) - ord('a')] = newvalue
@@ -160,11 +160,22 @@
class B:
count = 0
class A:
- def __del__(self):
- self.b.count += 1
+ pass
+
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ a.b.count += 1
+ fq = FQ()
+
def g():
b = B()
a = A()
+ fq.register_finalizer(a)
a.b = b
i = 0
lst = [None]
From pypy.commits at gmail.com Tue May 3 15:33:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 12:33:44 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: hg merge default
Message-ID: <5728fd18.876cc20a.1c4cc.00cb@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84177:769877b8ea60
Date: 2016-05-03 21:33 +0200
http://bitbucket.org/pypy/pypy/changeset/769877b8ea60/
Log: hg merge default
diff too long, truncating to 2000 out of 10007 lines
diff --git a/TODO b/TODO
deleted file mode 100644
--- a/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* reduce size of generated c code from slot definitions in slotdefs.
-* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,7 +352,7 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -52,7 +52,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -77,7 +77,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return '?'
raise
@@ -318,7 +318,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -406,7 +406,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +440,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +476,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -764,7 +764,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -772,7 +772,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -872,7 +872,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -896,7 +896,7 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -942,7 +942,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -952,7 +952,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -1049,7 +1049,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1167,7 +1167,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1287,7 +1287,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1365,7 +1365,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1375,7 +1375,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1526,7 +1526,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1705,7 +1705,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1716,7 +1716,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1731,7 +1731,7 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
raise OperationError(self.w_TypeError,
self.wrap("argument must be an int, or have a fileno() "
@@ -1746,7 +1746,7 @@
)
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -563,7 +563,7 @@
while pending is not None:
try:
pending.callback(pending.w_obj)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(space, pending.descrname, pending.w_obj)
e.clear(space) # break up reference cycles
pending = pending.next
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -540,7 +540,7 @@
try:
return space.call_method(space.w_object, '__getattribute__',
space.wrap(self), w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# fall-back to the attribute of the underlying 'im_func'
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -686,7 +686,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -703,7 +703,7 @@
space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
raise OperationError(space.w_RuntimeError,
space.wrap("maximum recursion depth exceeded"))
@@ -725,7 +725,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -746,7 +746,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args.prepend(w_obj))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -764,7 +764,7 @@
except DescrMismatch:
raise OperationError(space.w_SystemError,
space.wrap("unexpected DescrMismatch error"))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -784,7 +784,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -804,7 +804,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -824,7 +824,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2, w3]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -845,7 +845,7 @@
self.descr_reqcls,
Arguments(space,
[w1, w2, w3, w4]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -144,7 +144,7 @@
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
space.w_None)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration) or \
e.match(space, space.w_GeneratorExit):
return space.w_None
@@ -197,7 +197,7 @@
results=results, pycode=pycode)
try:
w_result = frame.execute_frame(space.w_None)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -8,7 +8,7 @@
w_modules = space.sys.get('modules')
try:
return space.getitem(w_modules, w_main)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
mainmodule = module.Module(space, w_main)
@@ -52,7 +52,7 @@
else:
return
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.record_interpreter_traceback()
raise
@@ -110,7 +110,7 @@
try:
w_stdout = space.sys.get('stdout')
w_softspace = space.getattr(w_stdout, space.wrap('softspace'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# Don't crash if user defined stdout doesn't have softspace
@@ -118,7 +118,7 @@
if space.is_true(w_softspace):
space.call_method(w_stdout, 'write', space.wrap('\n'))
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.normalize_exception(space)
w_type = operationerr.w_type
w_value = operationerr.get_w_value(space)
@@ -162,7 +162,7 @@
space.call_function(w_hook, w_type, w_value, w_traceback)
return False # done
- except OperationError, err2:
+ except OperationError as err2:
# XXX should we go through sys.get('stderr') ?
print >> sys.stderr, 'Error calling sys.excepthook:'
err2.print_application_traceback(space)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -169,7 +169,7 @@
while 1:
try:
value = eval(spec, d)
- except NameError, ex:
+ except NameError as ex:
name = ex.args[0].split("'")[1] # super-Evil
if name in d:
raise # propagate the NameError
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -110,7 +110,7 @@
if code_hook is not None:
try:
self.space.call_function(code_hook, self)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -55,21 +55,21 @@
try:
code = self.compile(source, filename, mode, flags)
return code # success
- except OperationError, err:
+ except OperationError as err:
if not err.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n", filename, mode, flags)
return None # expect more
- except OperationError, err1:
+ except OperationError as err1:
if not err1.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n\n", filename, mode, flags)
raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
+ except OperationError as err2:
if not err2.match(space, space.w_SyntaxError):
raise
@@ -131,7 +131,7 @@
try:
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError,
e.wrap_info(space))
return code
@@ -145,10 +145,10 @@
try:
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
- except parseerror.IndentationError, e:
+ except parseerror.IndentationError as e:
raise OperationError(space.w_IndentationError,
e.wrap_info(space))
- except parseerror.SyntaxError, e:
+ except parseerror.SyntaxError as e:
raise OperationError(space.w_SyntaxError,
e.wrap_info(space))
return mod
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -550,7 +550,7 @@
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
- raise ValueError, "new fastscope is longer than the allocated area"
+ raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -67,9 +67,9 @@
def handle_bytecode(self, co_code, next_instr, ec):
try:
next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
- except OperationError, operr:
+ except OperationError as operr:
next_instr = self.handle_operation_error(ec, operr)
- except RaiseWithExplicitTraceback, e:
+ except RaiseWithExplicitTraceback as e:
next_instr = self.handle_operation_error(ec, e.operr,
attach_tb=False)
except KeyboardInterrupt:
@@ -78,7 +78,7 @@
except MemoryError:
next_instr = self.handle_asynchronous_error(ec,
self.space.w_MemoryError)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
# Note that this case catches AttributeError!
rstackovf.check_stack_overflow()
next_instr = self.handle_asynchronous_error(ec,
@@ -117,7 +117,7 @@
finally:
if trace is not None:
self.getorcreatedebug().w_f_trace = trace
- except OperationError, e:
+ except OperationError as e:
operr = e
pytraceback.record_application_traceback(
self.space, operr, self, self.last_instr)
@@ -844,7 +844,7 @@
w_varname = self.getname_w(varindex)
try:
self.space.delitem(self.getorcreatedebug().w_locals, w_varname)
- except OperationError, e:
+ except OperationError as e:
# catch KeyErrors and turn them into NameErrors
if not e.match(self.space, self.space.w_KeyError):
raise
@@ -1003,7 +1003,7 @@
try:
if space.int_w(w_flag) == -1:
w_flag = None
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
@@ -1040,7 +1040,7 @@
w_module = self.peekvalue()
try:
w_obj = self.space.getattr(w_module, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
raise oefmt(self.space.w_ImportError,
@@ -1099,7 +1099,7 @@
w_iterator = self.peekvalue()
try:
w_nextitem = self.space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_StopIteration):
raise
# iterator exhausted
@@ -1110,7 +1110,7 @@
return next_instr
def FOR_LOOP(self, oparg, next_instr):
- raise BytecodeCorruption, "old opcode, no longer in use"
+ raise BytecodeCorruption("old opcode, no longer in use")
def SETUP_LOOP(self, offsettoend, next_instr):
block = LoopBlock(self, next_instr + offsettoend, self.lastblock)
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -118,7 +118,7 @@
if enc is not None and enc not in ('utf-8', 'iso-8859-1'):
try:
textsrc = recode_to_utf8(self.space, textsrc, enc)
- except OperationError, e:
+ except OperationError as e:
# if the codec is not found, LookupError is raised. we
# check using 'is_w' not to mask potential IndexError or
# KeyError
@@ -164,10 +164,10 @@
for tp, value, lineno, column, line in tokens:
if self.add_token(tp, value, lineno, column, line):
break
- except error.TokenError, e:
+ except error.TokenError as e:
e.filename = compile_info.filename
raise
- except parser.ParseError, e:
+ except parser.ParseError as e:
# Catch parse errors, pretty them up and reraise them as a
# SyntaxError.
new_err = error.IndentationError
diff --git a/pypy/interpreter/pyparser/test/unittest_samples.py b/pypy/interpreter/pyparser/test/unittest_samples.py
--- a/pypy/interpreter/pyparser/test/unittest_samples.py
+++ b/pypy/interpreter/pyparser/test/unittest_samples.py
@@ -66,7 +66,7 @@
print
try:
assert_tuples_equal(pypy_tuples, python_tuples)
- except AssertionError,e:
+ except AssertionError as e:
error_path = e.args[-1]
print "ERROR PATH =", error_path
print "="*80
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -224,7 +224,7 @@
def _spawn(self, *args, **kwds):
try:
import pexpect
- except ImportError, e:
+ except ImportError as e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -618,14 +618,14 @@
space = self.space
try:
Arguments(space, [], w_stararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after * must be a sequence, not int"
else:
assert 0, "did not raise"
try:
Arguments(space, [], w_starstararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after ** must be a mapping, not int"
else:
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -696,7 +696,7 @@
""")
try:
self.compiler.compile(str(source), '', 'exec', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -706,7 +706,7 @@
code = 'def f(): (yield bar) += y'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -716,7 +716,7 @@
code = 'dict(a = i for i in xrange(10))'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -1011,7 +1011,7 @@
"""
try:
exec source
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unindent does not match any outer indentation level'
else:
raise Exception("DID NOT RAISE")
@@ -1021,13 +1021,13 @@
source2 = "x = (\n\n"
try:
exec source1
- except SyntaxError, err1:
+ except SyntaxError as err1:
pass
else:
raise Exception("DID NOT RAISE")
try:
exec source2
- except SyntaxError, err2:
+ except SyntaxError as err2:
pass
else:
raise Exception("DID NOT RAISE")
diff --git a/pypy/interpreter/test/test_exceptcomp.py b/pypy/interpreter/test/test_exceptcomp.py
--- a/pypy/interpreter/test/test_exceptcomp.py
+++ b/pypy/interpreter/test/test_exceptcomp.py
@@ -7,7 +7,7 @@
def test_exception(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except TypeError:
pass
except:
@@ -15,7 +15,7 @@
def test_exceptionfail(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except KeyError:
self.fail("Different exceptions match.")
except TypeError:
@@ -47,7 +47,7 @@
class UserExcept(Exception):
pass
try:
- raise UserExcept, "nothing"
+ raise UserExcept("nothing")
except UserExcept:
pass
except:
diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
--- a/pypy/interpreter/test/test_exec.py
+++ b/pypy/interpreter/test/test_exec.py
@@ -196,11 +196,11 @@
def test_filename(self):
try:
exec "'unmatched_quote"
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
try:
eval("'unmatched_quote")
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
def test_exec_and_name_lookups(self):
@@ -213,7 +213,7 @@
try:
res = f()
- except NameError, e: # keep py.test from exploding confused
+ except NameError as e: # keep py.test from exploding confused
raise e
assert res == 1
diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -296,14 +296,14 @@
def test_call_error_message(self):
try:
len()
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (0 given)" in e.message
else:
assert 0, "did not raise"
try:
len(1, 2)
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (2 given)" in e.message
else:
assert 0, "did not raise"
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -26,7 +26,7 @@
wrappedfunc = space.getitem(w_glob, w(functionname))
try:
w_output = space.call_function(wrappedfunc, *wrappedargs)
- except error.OperationError, e:
+ except error.OperationError as e:
#e.print_detailed_traceback(space)
return '<<<%s>>>' % e.errorstr(space)
else:
@@ -331,7 +331,7 @@
def f(): f()
try:
f()
- except RuntimeError, e:
+ except RuntimeError as e:
assert str(e) == "maximum recursion depth exceeded"
else:
assert 0, "should have raised!"
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -86,7 +86,7 @@
""")
try:
space.unpackiterable(w_a)
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_ZeroDivisionError):
raise Exception("DID NOT RAISE")
else:
@@ -237,7 +237,7 @@
self.space.getindex_w, w_instance2, self.space.w_IndexError)
try:
self.space.getindex_w(self.space.w_tuple, None, "foobar")
- except OperationError, e:
+ except OperationError as e:
assert e.match(self.space, self.space.w_TypeError)
assert "foobar" in e.errorstr(self.space)
else:
diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -376,7 +376,7 @@
def g():
try:
raise Exception
- except Exception, e:
+ except Exception as e:
import sys
raise Exception, e, sys.exc_info()[2]
diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py
--- a/pypy/interpreter/test/test_raise.py
+++ b/pypy/interpreter/test/test_raise.py
@@ -18,34 +18,34 @@
def test_1arg(self):
try:
raise SystemError, 1
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
def test_2args(self):
try:
raise SystemError, (1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_instancearg(self):
try:
raise SystemError, SystemError(1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_more_precise_instancearg(self):
try:
raise Exception, SystemError(1, 2)
- except SystemError, e:
+ except SystemError as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_builtin_exc(self):
try:
[][0]
- except IndexError, e:
+ except IndexError as e:
assert isinstance(e, IndexError)
def test_raise_cls(self):
@@ -194,7 +194,7 @@
raise Sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
sub = Sub()
@@ -202,14 +202,14 @@
raise sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
try:
raise A, sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
assert sub.val is None
@@ -217,13 +217,13 @@
raise Sub, 42
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
assert a.val == 42
try:
{}[5]
- except A, a:
+ except A as a:
assert 0
except KeyError:
pass
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -254,7 +254,7 @@
space.wrap(s),
space.wrap('?'),
space.wrap('exec'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_SyntaxError):
raise
else:
@@ -723,7 +723,7 @@
line4 = "if ?: pass\n"
try:
exec "print\nprint\nprint\n" + line4
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 4
assert e.text == line4
assert e.offset == e.text.index('?') + 1
@@ -738,7 +738,7 @@
a b c d e
bar
"""
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 4
assert e.text.endswith('a b c d e\n')
assert e.offset == e.text.index('b')
@@ -749,7 +749,7 @@
program = "(1, 2) += (3, 4)\n"
try:
exec program
- except SyntaxError, e:
+ except SyntaxError as e:
assert e.lineno == 1
assert e.text is None
else:
@@ -769,7 +769,7 @@
for s in VALID:
try:
compile(s, '?', 'exec')
- except Exception, e:
+ except Exception as e:
print '-'*20, 'FAILED TO COMPILE:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
@@ -777,7 +777,7 @@
for s in INVALID:
try:
raises(SyntaxError, compile, s, '?', 'exec')
- except Exception ,e:
+ except Exception as e:
print '-'*20, 'UNEXPECTEDLY COMPILED:', '-'*20
print s
print '%s: %s' % (e.__class__, e)
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -13,7 +13,7 @@
# XXX why is this called newstring?
import sys
def f():
- raise TypeError, "hello"
+ raise TypeError("hello")
def g():
f()
@@ -23,7 +23,7 @@
except:
typ,val,tb = sys.exc_info()
else:
- raise AssertionError, "should have raised"
+ raise AssertionError("should have raised")
assert hasattr(tb, 'tb_frame')
assert hasattr(tb, 'tb_lasti')
assert hasattr(tb, 'tb_lineno')
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -520,7 +520,7 @@
def f(): yield 42
f().__reduce__()
""")
- except TypeError, e:
+ except TypeError as e:
if 'pickle generator' not in str(e):
raise
py.test.skip("Frames can't be __reduce__()-ed")
diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py
--- a/pypy/module/__builtin__/__init__.py
+++ b/pypy/module/__builtin__/__init__.py
@@ -102,7 +102,7 @@
space = self.space
try:
w_builtin = space.getitem(w_globals, space.wrap('__builtins__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
else:
diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py
--- a/pypy/module/__builtin__/abstractinst.py
+++ b/pypy/module/__builtin__/abstractinst.py
@@ -21,7 +21,7 @@
"""
try:
w_bases = space.getattr(w_cls, space.wrap('__bases__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return None
@@ -41,7 +41,7 @@
def abstract_getclass(space, w_obj):
try:
return space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise # propagate other errors
return space.type(w_obj)
@@ -63,7 +63,7 @@
w_result = space.isinstance_allow_override(w_obj, w_klass_or_tuple)
else:
w_result = space.isinstance(w_obj, w_klass_or_tuple)
- except OperationError, e: # if w_klass_or_tuple was not a type, ignore it
+ except OperationError as e: # if w_klass_or_tuple was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
@@ -81,7 +81,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_pretendtype, w_klass_or_tuple)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
return False # ignore most exceptions
@@ -102,7 +102,7 @@
" or tuple of classes and types")
try:
w_abstractclass = space.getattr(w_obj, space.wrap('__class__'))
- except OperationError, e:
+ except OperationError as e:
if e.async(space): # ignore most exceptions
raise
return False
@@ -142,7 +142,7 @@
w_klass_or_tuple)
else:
w_result = space.issubtype(w_derived, w_klass_or_tuple)
- except OperationError, e: # if one of the args was not a type, ignore it
+ except OperationError as e: # if one of the args was not a type, ignore it
if not e.match(space, space.w_TypeError):
raise # propagate other errors
else:
diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
--- a/pypy/module/__builtin__/descriptor.py
+++ b/pypy/module/__builtin__/descriptor.py
@@ -62,7 +62,7 @@
else:
try:
w_type = space.getattr(w_obj_or_type, space.wrap('__class__'))
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_AttributeError):
raise
w_type = w_objtype
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -80,7 +80,7 @@
start = space.int_w(w_start)
stop = space.int_w(w_stop)
step = space.int_w(w_step)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_OverflowError):
raise
return range_with_longs(space, w_start, w_stop, w_step)
@@ -177,7 +177,7 @@
jitdriver.jit_merge_point(has_key=has_key, has_item=has_item, w_type=w_type)
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
@@ -356,7 +356,7 @@
w_index = space.wrap(self.remaining)
try:
w_item = space.getitem(self.w_sequence, w_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -151,7 +151,7 @@
"cannot delete attribute '%s'", name)
try:
space.delitem(self.w_dict, w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
raise oefmt(space.w_AttributeError,
@@ -171,7 +171,7 @@
def get_module_string(self, space):
try:
w_mod = self.descr_getattribute(space, "__module__")
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return "?"
@@ -240,7 +240,7 @@
def binaryop(self, space, w_other):
try:
w_meth = self.getattr(space, name, False)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
return space.w_NotImplemented
raise
@@ -288,7 +288,7 @@
def _coerce_helper(space, w_self, w_other):
try:
w_tup = space.coerce(w_self, w_other)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
return [w_self, w_other]
@@ -350,7 +350,7 @@
if w_meth is not None:
try:
return space.call_function(w_meth, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not exc and e.match(space, space.w_AttributeError):
return None # eat the AttributeError
raise
@@ -542,7 +542,7 @@
return w_res
try:
res = space.int_w(w_res)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
@@ -561,7 +561,7 @@
return w_res
try:
res = space.int_w(w_res)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise OperationError(
space.w_TypeError,
@@ -630,7 +630,7 @@
while 1:
try:
w_x = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
return space.w_False
raise
diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -64,7 +64,7 @@
w_name = checkattrname(space, w_name)
try:
return space.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
if w_defvalue is not None:
if e.match(space, space.w_AttributeError):
return w_defvalue
@@ -192,7 +192,7 @@
is exhausted, it is returned instead of raising StopIteration."""
try:
return space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if w_default is not None and e.match(space, space.w_StopIteration):
return w_default
raise
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -688,7 +688,7 @@
def test_catch_attributeerror_of_descriptor(self):
def booh(self):
- raise this_exception, "booh"
+ raise this_exception("booh")
class E:
__eq__ = property(booh)
diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py
--- a/pypy/module/__builtin__/test/test_descriptor.py
+++ b/pypy/module/__builtin__/test/test_descriptor.py
@@ -93,7 +93,7 @@
def test_super_fail(self):
try:
super(list, 2)
- except TypeError, e:
+ except TypeError as e:
message = e.args[0]
assert message.startswith('super(type, obj): obj must be an instance or subtype of type')
@@ -303,7 +303,7 @@
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
- except TypeError, msg:
+ except TypeError as msg:
if str(msg).find('readonly') < 0:
raise Exception("when setting readonly attr %r on a "
"property, got unexpected TypeError "
@@ -322,7 +322,7 @@
except ZeroDivisionError:
pass
else:
- raise Exception, "expected ZeroDivisionError from bad property"
+ raise Exception("expected ZeroDivisionError from bad property")
def test_property_subclass(self):
class P(property):
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -106,7 +106,7 @@
def validate_fd(space, fd):
try:
rposix.validate_fd(fd)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
def get_console_cp(space):
diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py
--- a/pypy/module/__pypy__/test/test_signal.py
+++ b/pypy/module/__pypy__/test/test_signal.py
@@ -35,7 +35,7 @@
for i in range(10):
print('x')
time.sleep(0.25)
- except BaseException, e:
+ except BaseException as e:
interrupted.append(e)
finally:
print('subthread stops, interrupted=%r' % (interrupted,))
@@ -120,7 +120,7 @@
time.sleep(0.5)
with __pypy__.thread.signals_enabled:
thread.interrupt_main()
- except BaseException, e:
+ except BaseException as e:
interrupted.append(e)
finally:
lock.release()
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -113,7 +113,7 @@
must_leave = space.threadlocals.try_enter_thread(space)
self.py_invoke(ll_res, ll_args)
#
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "SystemError: callback raised ")
@@ -143,7 +143,7 @@
w_res = space.call(self.w_callable, w_args)
extra_line = "Trying to convert the result back to C:\n"
self.convert_result(ll_res, w_res)
- except OperationError, e:
+ except OperationError as e:
self.handle_applevel_exception(e, ll_res, extra_line)
@jit.unroll_safe
@@ -188,7 +188,7 @@
w_res = space.call_function(self.w_onerror, w_t, w_v, w_tb)
if not space.is_none(w_res):
self.convert_result(ll_res, w_res)
- except OperationError, e2:
+ except OperationError as e2:
# double exception! print a double-traceback...
self.print_error(e, extra_line) # original traceback
e2.write_unraisable(space, '', with_traceback=True,
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -247,7 +247,7 @@
for i in range(length):
try:
w_item = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise oefmt(space.w_ValueError,
@@ -256,7 +256,7 @@
target = rffi.ptradd(target, ctitemsize)
try:
space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
else:
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -21,7 +21,7 @@
filename = ""
try:
handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -50,7 +50,7 @@
builder = CifDescrBuilder(fargs, fresult, abi)
try:
builder.rawallocate(self)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_NotImplementedError):
raise
# else, eat the NotImplementedError. We will get the
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -177,12 +177,12 @@
space = self.space
try:
fieldname = space.str_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
try:
index = space.int_w(w_field_or_index)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise OperationError(space.w_TypeError,
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -381,6 +381,6 @@
space.wrap("file has no OS file descriptor"))
try:
w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
- except OSError, e:
+ except OSError as e:
raise wrap_oserror(space, e)
return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py
--- a/pypy/module/_cffi_backend/embedding.py
+++ b/pypy/module/_cffi_backend/embedding.py
@@ -79,7 +79,7 @@
patch_sys(space)
load_embedded_cffi_module(space, version, init_struct)
res = 0
- except OperationError, operr:
+ except OperationError as operr:
operr.write_unraisable(space, "initialization of '%s'" % name,
with_traceback=True)
space.appexec([], r"""():
@@ -91,7 +91,7 @@
res = -1
if must_leave:
space.threadlocals.leave_thread(space)
- except Exception, e:
+ except Exception as e:
# oups! last-level attempt to recover.
try:
os.write(STDERR, "From initialization of '")
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -109,7 +109,7 @@
# w.r.t. buffers and memoryviews??
try:
buf = space.readbuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_SIMPLE)
@@ -118,7 +118,7 @@
def _fetch_as_write_buffer(space, w_x):
try:
buf = space.writebuf_w(w_x)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
buf = space.buffer_w(w_x, space.BUF_WRITABLE)
diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -39,7 +39,7 @@
mod = __import__(modname, None, None, ['ffi', 'lib'])
return mod.lib""")
lib1 = space.interp_w(W_LibObject, w_lib1)
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
raise oefmt(space.w_ImportError,
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -24,7 +24,7 @@
filename = ""
try:
self.handle = dlopen(ll_libname, flags)
- except DLOpenError, e:
+ except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -132,7 +132,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -149,7 +149,7 @@
return space.int_w(w_ob)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if _is_a_float(space, w_ob):
@@ -172,7 +172,7 @@
return r_ulonglong(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
@@ -197,7 +197,7 @@
return r_uint(value)
try:
bigint = space.bigint_w(w_ob, allow_conversion=False)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
if strict and _is_a_float(space, w_ob):
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -175,7 +175,7 @@
w_start = space.getattr(w_exc, space.wrap('start'))
w_end = space.getattr(w_exc, space.wrap('end'))
w_obj = space.getattr(w_exc, space.wrap('object'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
raise OperationError(space.w_TypeError, space.wrap(
@@ -533,7 +533,7 @@
else:
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -566,7 +566,7 @@
# get the character from the mapping
try:
w_ch = space.getitem(self.w_mapping, space.newint(ord(ch)))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_LookupError):
raise
return errorchar
@@ -645,7 +645,7 @@
space = self.space
try:
w_code = space.call_function(self.w_getcode, space.wrap(name))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
return -1
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -458,7 +458,7 @@
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
- except UnicodeDecodeError, ex:
+ except UnicodeDecodeError as ex:
assert "unicode_internal" == ex.encoding
assert "\x00\x00\x00\x00\x00\x11\x11\x00" == ex.object
assert ex.start == 4
@@ -650,7 +650,7 @@
def test_utf7_start_end_in_exception(self):
try:
'+IC'.decode('utf-7')
- except UnicodeDecodeError, exc:
+ except UnicodeDecodeError as exc:
assert exc.start == 0
assert exc.end == 3
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -169,7 +169,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
@@ -191,7 +191,7 @@
while True:
try:
w_obj = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py
--- a/pypy/module/_collections/test/test_defaultdict.py
+++ b/pypy/module/_collections/test/test_defaultdict.py
@@ -26,7 +26,7 @@
for key in ['foo', (1,)]:
try:
d1[key]
- except KeyError, err:
+ except KeyError as err:
assert err.args[0] == key
else:
assert 0, "expected KeyError"
diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -224,7 +224,7 @@
try:
frame = self.bottomframe
w_result = frame.execute_frame()
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py
--- a/pypy/module/_continuation/interp_pickle.py
+++ b/pypy/module/_continuation/interp_pickle.py
@@ -69,7 +69,7 @@
try:
w_result = post_switch(sthread, h)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
#
@@ -88,7 +88,7 @@
try:
w_result = frame.execute_frame(w_result, operr)
operr = None
- except OperationError, e:
+ except OperationError as e:
w_result = None
operr = e
if exit_continulet is not None:
@@ -97,7 +97,7 @@
sthread.ec.topframeref = jit.vref_None
if operr:
raise operr
- except Exception, e:
+ except Exception as e:
global_state.propagate_exception = e
else:
global_state.w_value = w_result
diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py
--- a/pypy/module/_continuation/test/support.py
+++ b/pypy/module/_continuation/test/support.py
@@ -8,6 +8,6 @@
def setup_class(cls):
try:
import rpython.rlib.rstacklet
- except CompilationError, e:
+ except CompilationError as e:
py.test.skip("cannot import rstacklet: %s" % e)
diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -553,11 +553,11 @@
res = "got keyerror"
try:
c1.switch(res)
- except IndexError, e:
+ except IndexError as e:
pass
try:
c1.switch(e)
- except IndexError, e2:
+ except IndexError as e2:
pass
try:
c1.switch(e2)
diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py
--- a/pypy/module/_csv/interp_reader.py
+++ b/pypy/module/_csv/interp_reader.py
@@ -66,7 +66,7 @@
while True:
try:
w_line = space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
if (field_builder is not None and
state != START_RECORD and state != EAT_CRNL and
diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py
--- a/pypy/module/_csv/interp_writer.py
+++ b/pypy/module/_csv/interp_writer.py
@@ -49,7 +49,7 @@
try:
space.float_w(w_field) # is it an int/long/float?
quoted = False
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
quoted = True
@@ -124,7 +124,7 @@
while True:
try:
w_seq = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration):
break
raise
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -56,7 +56,7 @@
assert isinstance(self, W_File)
try:
self.direct_close()
- except StreamErrors, e:
+ except StreamErrors as e:
operr = wrap_streamerror(self.space, e, self.w_name)
raise operr
@@ -203,7 +203,7 @@
while n > 0:
try:
data = stream.read(n)
- except OSError, e:
+ except OSError as e:
# a special-case only for read() (similar to CPython, which
# also loses partial data with other methods): if we get
# EAGAIN after already some data was received, return it.
diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -83,7 +83,7 @@
"""
try:
return self.stream.read(n)
- except StreamErrors, e:
+ except StreamErrors as e:
raise wrap_streamerror(self.space, e)
def do_write(self, data):
@@ -94,7 +94,7 @@
"""
try:
self.stream.write(data)
- except StreamErrors, e:
+ except StreamErrors as e:
raise wrap_streamerror(self.space, e)
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -151,7 +151,7 @@
def test_oserror_has_filename(self):
try:
f = self.file("file that is clearly not there")
- except IOError, e:
+ except IOError as e:
assert e.filename == 'file that is clearly not there'
else:
raise Exception("did not raise")
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -28,7 +28,7 @@
space = global_name_fetcher.space
w_name = space.wrap(rffi.charp2str(obj_name[0].c_name))
global_name_fetcher.meth_names.append(w_name)
- except OperationError, e:
+ except OperationError as e:
global_name_fetcher.w_error = e
class NameFetcher:
diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -99,7 +99,7 @@
for hash_name, expected in sorted(expected_results.items()):
try:
m = _hashlib.new(hash_name)
- except ValueError, e:
+ except ValueError as e:
print 'skipped %s: %s' % (hash_name, e)
continue
m.update(test_string)
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -223,7 +223,7 @@
typename = space.type(self).name
try:
w_name = space.getattr(self, space.wrap("name"))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_Exception):
raise
return space.wrap("<%s>" % (typename,))
@@ -350,7 +350,7 @@
while True:
try:
w_written = space.call_method(self.w_raw, "write", w_data)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
continue # try again
raise
@@ -526,7 +526,7 @@
while True:
try:
w_size = space.call_method(self.w_raw, "readinto", w_buf)
- except OperationError, e:
+ except OperationError as e:
if trap_eintr(space, e):
From pypy.commits at gmail.com Tue May 3 16:53:02 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 13:53:02 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: fix the XXXXXX
Message-ID: <57290fae.508e1c0a.f0743.60ef@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84178:3502aa349b77
Date: 2016-05-03 21:50 +0200
http://bitbucket.org/pypy/pypy/changeset/3502aa349b77/
Log: fix the XXXXXX
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -33,30 +33,26 @@
self.config = config
assert isinstance(translated_to_c, bool)
self.translated_to_c = translated_to_c
+ self._finalizer_queue_objects = []
def setup(self):
# all runtime mutable values' setup should happen here
# and in its overriden versions! for the benefit of test_transformed_gc
self.finalizer_lock = False
- if we_are_translated():
- XXXXXX
- else:
- self._finalizer_queue_objects = [] # XXX FIX ME
def register_finalizer_index(self, fq, index):
+ "NOT_RPYTHON"
while len(self._finalizer_queue_objects) <= index:
self._finalizer_queue_objects.append(None)
if self._finalizer_queue_objects[index] is None:
fq._reset()
+ fq._gc_deque = self.AddressDeque()
self._finalizer_queue_objects[index] = fq
else:
assert self._finalizer_queue_objects[index] is fq
- def add_finalizer_to_run(self, fq_index, obj):
- if we_are_translated():
- XXXXXX
- else:
- self._finalizer_queue_objects[fq_index]._queue.append(obj)
+ def mark_finalizer_to_run(self, fq_index, obj):
+ self._finalizer_queue_objects[fq_index]._gc_deque.append(obj)
def post_setup(self):
# More stuff that needs to be initialized when the GC is already
@@ -65,7 +61,7 @@
self.DEBUG = env.read_from_env('PYPY_GC_DEBUG')
def _teardown(self):
- pass
+ self._finalizer_queue_objects = [] # for tests
def can_optimize_clean_setarrayitems(self):
return True # False in case of card marking
@@ -345,11 +341,12 @@
enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)'
def enum_pending_finalizers(self, callback, arg):
- if we_are_translated():
- XXXXXX #. foreach(callback, arg)
- for fq in self._finalizer_queue_objects:
- for obj in fq._queue:
- callback(obj, arg)
+ i = 0
+ while i < len(self._finalizer_queue_objects):
+ fq = self._finalizer_queue_objects[i]
+ if fq is not None:
+ fq._gc_deque.foreach(callback, arg)
+ i += 1
enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
def debug_check_consistency(self):
@@ -395,11 +392,12 @@
return # the outer invocation of execute_finalizers() will do it
self.finalizer_lock = True
try:
- if we_are_translated():
- XXXXXX
- for i, fq in enumerate(self._finalizer_queue_objects):
- if len(fq._queue) > 0:
+ i = 0
+ while i < len(self._finalizer_queue_objects):
+ fq = self._finalizer_queue_objects[i]
+ if fq is not None and fq._gc_deque.non_empty():
self.finalizer_trigger(i)
+ i += 1
finally:
self.finalizer_lock = False
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2679,7 +2679,7 @@
if state == 2:
from rpython.rtyper.lltypesystem import rffi
fq_index = rffi.cast(lltype.Signed, fq_nr)
- self.add_finalizer_to_run(fq_index, x)
+ self.mark_finalizer_to_run(fq_index, x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -215,8 +215,9 @@
def gc_fq_next_dead(self, fq_tag):
fq, _ = self.get_finalizer_queue_index(fq_tag)
- addr = fq.next_dead()
- if addr is None:
+ if fq._gc_deque.non_empty():
+ addr = fq._gc_deque.popleft()
+ else:
addr = llmemory.NULL
return llmemory.cast_adr_to_ptr(addr, rclass.OBJECTPTR)
From pypy.commits at gmail.com Tue May 3 16:53:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 13:53:04 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: in-progress
Message-ID: <57290fb0.d2aa1c0a.80c95.6092@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84179:979bc16d2cc9
Date: 2016-05-03 22:48 +0200
http://bitbucket.org/pypy/pypy/changeset/979bc16d2cc9/
Log: in-progress
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -6,6 +6,9 @@
from rpython.memory.support import DEFAULT_CHUNK_SIZE
from rpython.memory.support import get_address_stack, get_address_deque
from rpython.memory.support import AddressDict, null_address_dict
+from rpython.memory.support import make_list_of_nongc_instances
+from rpython.memory.support import list_set_nongc_instance
+from rpython.memory.support import list_get_nongc_instance
from rpython.rtyper.lltypesystem.llmemory import NULL, raw_malloc_usage
TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed),
@@ -33,7 +36,7 @@
self.config = config
assert isinstance(translated_to_c, bool)
self.translated_to_c = translated_to_c
- self._finalizer_queue_objects = []
+ self.run_finalizer_queues = make_list_of_nongc_instances(0)
def setup(self):
# all runtime mutable values' setup should happen here
@@ -42,17 +45,23 @@
def register_finalizer_index(self, fq, index):
"NOT_RPYTHON"
- while len(self._finalizer_queue_objects) <= index:
- self._finalizer_queue_objects.append(None)
- if self._finalizer_queue_objects[index] is None:
- fq._reset()
- fq._gc_deque = self.AddressDeque()
- self._finalizer_queue_objects[index] = fq
- else:
- assert self._finalizer_queue_objects[index] is fq
+ if len(self.run_finalizer_queues) <= index:
+ array = make_list_of_nongc_instances(index + 1)
+ for i in range(len(self.run_finalizer_queues)):
+ array[i] = self.run_finalizer_queues[i]
+ self.run_finalizer_queues = array
+ #
+ fdold = list_get_nongc_instance(self.AddressDeque,
+ self.run_finalizer_queues, index)
+ list_set_nongc_instance(self.run_finalizer_queues, index,
+ self.AddressDeque())
+ if fdold is not None:
+ fdold.delete()
def mark_finalizer_to_run(self, fq_index, obj):
- self._finalizer_queue_objects[fq_index]._gc_deque.append(obj)
+ fdeque = list_get_nongc_instance(self.AddressDeque,
+ self.run_finalizer_queues, fq_index)
+ fdeque.append(obj)
def post_setup(self):
# More stuff that needs to be initialized when the GC is already
@@ -61,7 +70,7 @@
self.DEBUG = env.read_from_env('PYPY_GC_DEBUG')
def _teardown(self):
- self._finalizer_queue_objects = [] # for tests
+ pass
def can_optimize_clean_setarrayitems(self):
return True # False in case of card marking
@@ -342,10 +351,11 @@
def enum_pending_finalizers(self, callback, arg):
i = 0
- while i < len(self._finalizer_queue_objects):
- fq = self._finalizer_queue_objects[i]
- if fq is not None:
- fq._gc_deque.foreach(callback, arg)
+ while i < len(self.run_finalizer_queues):
+ fdeque = list_get_nongc_instance(self.AddressDeque,
+ self.run_finalizer_queues, i)
+ if fdeque is not None:
+ fdeque.foreach(callback, arg)
i += 1
enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
@@ -393,14 +403,24 @@
self.finalizer_lock = True
try:
i = 0
- while i < len(self._finalizer_queue_objects):
- fq = self._finalizer_queue_objects[i]
- if fq is not None and fq._gc_deque.non_empty():
+ while i < len(self.run_finalizer_queues):
+ fdeque = list_get_nongc_instance(self.AddressDeque,
+ self.run_finalizer_queues, i)
+ if fdeque is not None and fdeque.non_empty():
self.finalizer_trigger(i)
i += 1
finally:
self.finalizer_lock = False
+ def finalizer_next_dead(self, fq_index):
+ fdeque = list_get_nongc_instance(self.AddressDeque,
+ self.run_finalizer_queues, fq_index)
+ if fdeque.non_empty():
+ obj = fdeque.popleft()
+ else:
+ obj = llmemory.NULL
+ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+
class MovingGCBase(GCBase):
moving_gc = True
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -255,6 +255,7 @@
self.layoutbuilder.encode_type_shapes_now()
self.create_custom_trace_funcs(gcdata.gc, translator.rtyper)
+ self.create_finalizer_trigger(gcdata)
annhelper.finish() # at this point, annotate all mix-level helpers
annhelper.backend_optimize()
@@ -301,7 +302,6 @@
[s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(),
- annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
self.malloc_varsize_ptr = getfn(
@@ -316,7 +316,6 @@
[s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(),
- annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
self.malloc_varsize_ptr = getfn(
@@ -379,7 +378,7 @@
malloc_fast,
[s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
- s_False, s_False, s_False], s_gcref,
+ s_False, s_False], s_gcref,
inline = True)
else:
self.malloc_fast_ptr = None
@@ -597,6 +596,11 @@
"the custom trace hook %r for %r can cause "
"the GC to be called!" % (func, TP))
+ def create_finalizer_trigger(self, gcdata):
+ def ll_finalizer_trigger(fq_index):
+ pass #xxxxxxxxxxxxx
+ gcdata.init_finalizer_trigger(ll_finalizer_trigger)
+
def consider_constant(self, TYPE, value):
self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)
@@ -772,13 +776,10 @@
info = self.layoutbuilder.get_info(type_id)
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
fptrs = self.special_funcptr_for_type(TYPE)
- has_finalizer = "finalizer" in fptrs
- has_light_finalizer = "light_finalizer" in fptrs
- if has_light_finalizer:
- has_finalizer = True
- c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)
- c_has_light_finalizer = rmodel.inputconst(lltype.Bool,
- has_light_finalizer)
+ has_destructor = "destructor" in fptrs
+ assert "finalizer" not in fptrs # removed
+ assert "light_finalizer" not in fptrs # removed
+ c_has_destructor = rmodel.inputconst(lltype.Bool, has_destructor)
if flags.get('nonmovable'):
assert op.opname == 'malloc'
@@ -788,16 +789,16 @@
elif not op.opname.endswith('_varsize') and not flags.get('varsize'):
zero = flags.get('zero', False)
if (self.malloc_fast_ptr is not None and
- not c_has_finalizer.value and
+ not c_has_destructor.value and
(self.malloc_fast_is_clearing or not zero)):
malloc_ptr = self.malloc_fast_ptr
else:
malloc_ptr = self.malloc_fixedsize_ptr
args = [self.c_const_gc, c_type_id, c_size,
- c_has_finalizer, c_has_light_finalizer,
+ c_has_destructor,
rmodel.inputconst(lltype.Bool, False)]
else:
- assert not c_has_finalizer.value
+ assert not c_has_destructor.value
info_varsize = self.layoutbuilder.get_info_varsize(type_id)
v_length = op.args[-1]
c_ofstolength = rmodel.inputconst(lltype.Signed,
@@ -933,13 +934,12 @@
def gct_do_malloc_fixedsize(self, hop):
# used by the JIT (see rpython.jit.backend.llsupport.gc)
op = hop.spaceop
- [v_typeid, v_size,
- v_has_finalizer, v_has_light_finalizer, v_contains_weakptr] = op.args
+ [v_typeid, v_size, v_has_destructor, v_contains_weakptr] = op.args
livevars = self.push_roots(hop)
hop.genop("direct_call",
[self.malloc_fixedsize_ptr, self.c_const_gc,
v_typeid, v_size,
- v_has_finalizer, v_has_light_finalizer,
+ v_has_destructor,
v_contains_weakptr],
resultvar=op.result)
self.pop_roots(hop, livevars)
@@ -1047,7 +1047,7 @@
c_false = rmodel.inputconst(lltype.Bool, False)
c_has_weakptr = rmodel.inputconst(lltype.Bool, True)
args = [self.c_const_gc, c_type_id, c_size,
- c_false, c_false, c_has_weakptr]
+ c_false, c_has_weakptr]
# push and pop the current live variables *including* the argument
# to the weakref_create operation, which must be kept alive and
@@ -1518,18 +1518,14 @@
return rtti is not None and getattr(rtti._obj, 'destructor_funcptr',
None)
- def has_light_finalizer(self, TYPE):
- fptrs = self.special_funcptr_for_type(TYPE)
- return "light_finalizer" in fptrs
-
def has_custom_trace(self, TYPE):
rtti = get_rtti(TYPE)
return rtti is not None and getattr(rtti._obj, 'custom_trace_funcptr',
None)
- def make_finalizer_funcptr_for_type(self, TYPE):
- if not self.has_finalizer(TYPE):
- return None, False
+ def make_destructor_funcptr_for_type(self, TYPE):
+ if not self.has_destructor(TYPE):
+ return None
rtti = get_rtti(TYPE)
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
@@ -1539,12 +1535,9 @@
ll_call_destructor(destrptr, v, typename)
fptr = self.transformer.annotate_finalizer(ll_finalizer,
[llmemory.Address], lltype.Void)
- try:
- g = destrptr._obj.graph
- light = not FinalizerAnalyzer(self.translator).analyze_light_finalizer(g)
- except lltype.DelayedPointer:
- light = False # XXX bah, too bad
- return fptr, light
+ g = destrptr._obj.graph
+ FinalizerAnalyzer(self.translator).check_light_finalizer(g)
+ return fptr
def make_custom_trace_funcptr_for_type(self, TYPE):
if not self.has_custom_trace(TYPE):
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -84,10 +84,10 @@
return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
def init_finalizer_trigger(self, finalizer_trigger):
- self.finalizer_trigger = finalizer_trigger
+ self._finalizer_trigger = finalizer_trigger
def q_finalizer_trigger(self, fq_index):
- self.finalizer_trigger(fq_index)
+ self._finalizer_trigger(fq_index)
def q_destructor_or_custom_trace(self, typeid):
return self.get(typeid).customfunc
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -214,12 +214,9 @@
return (fq, index)
def gc_fq_next_dead(self, fq_tag):
- fq, _ = self.get_finalizer_queue_index(fq_tag)
- if fq._gc_deque.non_empty():
- addr = fq._gc_deque.popleft()
- else:
- addr = llmemory.NULL
- return llmemory.cast_adr_to_ptr(addr, rclass.OBJECTPTR)
+ fq, index = self.get_finalizer_queue_index(fq_tag)
+ return lltype.cast_opaque_ptr(rclass.OBJECTPTR,
+ self.gc.finalizer_next_dead(index))
def gc_fq_register(self, fq_tag, ptr):
fq, index = self.get_finalizer_queue_index(fq_tag)
diff --git a/rpython/memory/support.py b/rpython/memory/support.py
--- a/rpython/memory/support.py
+++ b/rpython/memory/support.py
@@ -2,6 +2,9 @@
from rpython.rlib.objectmodel import free_non_gc_object, we_are_translated
from rpython.rlib.debug import ll_assert
from rpython.tool.identity_dict import identity_dict
+from rpython.rtyper.rclass import NONGCOBJECTPTR
+from rpython.rtyper.annlowlevel import cast_nongc_instance_to_base_ptr
+from rpython.rtyper.annlowlevel import cast_base_ptr_to_nongc_instance
def mangle_hash(i):
@@ -393,3 +396,17 @@
def _null_value_checker(key, value, arg):
if value:
arg.setitem(key, value)
+
+# ____________________________________________________________
+
+NONGCARRAY = lltype.Array(NONGCOBJECTPTR)
+
+def make_list_of_nongc_instances(count):
+ return lltype.malloc(NONGCARRAY, count, flavor='raw', zero=True,
+ track_allocation=False)
+
+def list_get_nongc_instance(Class, array, index):
+ return cast_base_ptr_to_nongc_instance(Class, array[index])
+
+def list_set_nongc_instance(array, index, instance):
+ array[index] = cast_nongc_instance_to_base_ptr(instance)
diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -293,7 +293,7 @@
res = run([])
assert res == 42
- def define_finalizer(cls):
+ def define_destructor(cls):
class B(object):
pass
b = B()
@@ -316,6 +316,39 @@
return b.num_deleted
return f
+ def test_destructor(self):
+ run = self.runner("destructor")
+ res = run([5, 42]) #XXX pure lazyness here too
+ assert res == 6
+
+ def define_finalizer(cls):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ fq = FQ()
+ def f(x, y):
+ a = A()
+ i = 0
+ while i < x:
+ i += 1
+ a = A()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted
+ return f
+
def test_finalizer(self):
run = self.runner("finalizer")
res = run([5, 42]) #XXX pure lazyness here too
@@ -331,12 +364,20 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- C()
+ fq.register_finalizer(self)
class C(AAA):
- def __del__(self):
- b.num_deleted += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = AAA
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted += 1
+ if not isinstance(a, C):
+ C()
+ fq = FQ()
def f(x, y):
a = AAA()
i = 0
@@ -363,9 +404,17 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- b.a = self
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted += 1
+ b.a = a
+ fq = FQ()
def f(x, y):
a = A()
i = 0
@@ -376,7 +425,7 @@
llop.gc__collect(lltype.Void)
aid = b.a.id
b.a = None
- # check that __del__ is not called again
+ # check that finalizer_trigger() is not called again
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted * 10 + aid + 100 * (b.a is None)
@@ -440,7 +489,7 @@
res = run([])
assert res
- def define_weakref_to_object_with_finalizer(cls):
+ def define_weakref_to_object_with_destructor(cls):
import weakref, gc
class A(object):
count = 0
@@ -459,6 +508,36 @@
return result
return f
+ def test_weakref_to_object_with_destructor(self):
+ run = self.runner("weakref_to_object_with_destructor")
+ res = run([])
+ assert res
+
+ def define_weakref_to_object_with_finalizer(cls):
+ import weakref, gc
+ class A(object):
+ count = 0
+ a = A()
+ class B(object):
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ a.count += 1
+ fq = FQ()
+ def g():
+ b = B()
+ fq.register_finalizer(b)
+ return weakref.ref(b)
+ def f():
+ ref = g()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ result = a.count == 1 and (ref() is None)
+ return result
+ return f
+
def test_weakref_to_object_with_finalizer(self):
run = self.runner("weakref_to_object_with_finalizer")
res = run([])
@@ -475,15 +554,24 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- llop.gc__collect(lltype.Void)
- b.num_deleted += 1
- C()
- C()
+ fq.register_finalizer(self)
class C(A):
- def __del__(self):
- b.num_deleted += 1
- b.num_deleted_c += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ llop.gc__collect(lltype.Void)
+ b.num_deleted += 1
+ if isinstance(a, C):
+ b.num_deleted_c += 1
+ else:
+ C()
+ C()
+ fq = FQ()
def f(x, y):
persistent_a1 = A()
persistent_a2 = A()
@@ -756,8 +844,7 @@
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
- Constant(False, lltype.Bool), # has_finalizer
- Constant(False, lltype.Bool), # is_finalizer_light
+ Constant(False, lltype.Bool), # has_destructor
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
@@ -793,8 +880,7 @@
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
- Constant(False, lltype.Bool), # has_finalizer
- Constant(False, lltype.Bool), # is_finalizer_light
+ Constant(False, lltype.Bool), # has_destructor
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py
--- a/rpython/rtyper/annlowlevel.py
+++ b/rpython/rtyper/annlowlevel.py
@@ -471,6 +471,11 @@
return lltype.cast_opaque_ptr(llmemory.GCREF,
cast_instance_to_base_ptr(instance))
+ at specialize.argtype(0)
+def cast_nongc_instance_to_base_ptr(instance):
+ from rpython.rtyper.rclass import NONGCOBJECTPTR
+ return cast_object_to_ptr(NONGCOBJECTPTR, instance)
+
class CastObjectToPtrEntry(extregistry.ExtRegistryEntry):
_about_ = cast_object_to_ptr
@@ -512,6 +517,8 @@
% (ptr, Class))
return ptr
+cast_base_ptr_to_nongc_instance = cast_base_ptr_to_instance
+
@specialize.arg(0)
def cast_gcref_to_instance(Class, ptr):
"""Reverse the hacking done in cast_instance_to_gcref()."""
From pypy.commits at gmail.com Tue May 3 16:53:06 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 03 May 2016 13:53:06 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: fix
Message-ID: <57290fb2.a16ec20a.8d30e.1717@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84180:83a6a474a555
Date: 2016-05-03 22:51 +0200
http://bitbucket.org/pypy/pypy/changeset/83a6a474a555/
Log: fix
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -211,17 +211,17 @@
assert index == len(self.finalizer_queues)
self.finalizer_queue_indexes[fq] = index
self.finalizer_queues.append(fq)
- return (fq, index)
+ self.gc.register_finalizer_index(fq, index)
+ return index
def gc_fq_next_dead(self, fq_tag):
- fq, index = self.get_finalizer_queue_index(fq_tag)
+ index = self.get_finalizer_queue_index(fq_tag)
return lltype.cast_opaque_ptr(rclass.OBJECTPTR,
self.gc.finalizer_next_dead(index))
def gc_fq_register(self, fq_tag, ptr):
- fq, index = self.get_finalizer_queue_index(fq_tag)
+ index = self.get_finalizer_queue_index(fq_tag)
ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
- self.gc.register_finalizer_index(fq, index)
self.gc.register_finalizer(index, ptr)
# ____________________________________________________________
From pypy.commits at gmail.com Tue May 3 19:20:21 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Tue, 03 May 2016 16:20:21 -0700 (PDT)
Subject: [pypy-commit] pypy default: Give better error messages for '%d' %
'not an int' (and %x, %o).
Message-ID: <57293235.a82cc20a.62e83.403a@mx.google.com>
Author: Devin Jeanpierre
Branch:
Changeset: r84181:75c1b672983d
Date: 2016-05-03 16:19 -0700
http://bitbucket.org/pypy/pypy/changeset/75c1b672983d/
Log: Give better error messages for '%d' % 'not an int' (and %x, %o).
Before: TypeError: unsupported operand type for long(): 'str' After:
TypeError: %d format: a number is required, not str (same as
CPython).
diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py
--- a/pypy/objspace/std/formatting.py
+++ b/pypy/objspace/std/formatting.py
@@ -551,7 +551,15 @@
try:
w_value = maybe_int(space, w_value)
except OperationError:
- w_value = space.long(w_value)
+ try:
+ w_value = space.long(w_value)
+ except OperationError as operr:
+ if operr.match(space, space.w_TypeError):
+ raise oefmt(
+ space.w_TypeError,
+ "%s format: a number is required, not %T", fmt, w_value)
+ else:
+ raise
try:
value = space.int_w(w_value)
return fmt % (value,)
diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py
--- a/pypy/objspace/std/test/test_bytesobject.py
+++ b/pypy/objspace/std/test/test_bytesobject.py
@@ -103,6 +103,12 @@
assert result == "a foo b"
assert isinstance(result, cls)
+ def test_format_wrongtype(self):
+ for int_format in '%d', '%o', '%x':
+ exc_info = raises(TypeError, int_format.__mod__, '123')
+ expected = int_format + ' format: a number is required, not str'
+ assert str(exc_info.value) == expected
+
def test_split(self):
assert "".split() == []
assert "".split('x') == ['']
From pypy.commits at gmail.com Tue May 3 20:24:12 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 03 May 2016 17:24:12 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Update zipimport for the change in pyc
format (fixes zipimport -A tests)
Message-ID: <5729412c.d2711c0a.b6020.ffff8e6c@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r84182:f8f5beaa6782
Date: 2016-05-04 01:23 +0100
http://bitbucket.org/pypy/pypy/changeset/f8f5beaa6782/
Log: Update zipimport for the change in pyc format (fixes zipimport -A
tests)
diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py
--- a/pypy/module/zipimport/interp_zipimport.py
+++ b/pypy/module/zipimport/interp_zipimport.py
@@ -200,7 +200,8 @@
timestamp = importing._get_long(buf[4:8])
if not self.can_use_pyc(space, filename, magic, timestamp):
return None
- buf = buf[8:] # XXX ugly copy, should use sequential read instead
+ # zipimport ignores the size field
+ buf = buf[12:] # XXX ugly copy, should use sequential read instead
w_mod = w(Module(space, w(modname)))
real_name = self.filename + os.path.sep + self.corr_zname(filename)
space.setattr(w_mod, w('__loader__'), space.wrap(self))
@@ -305,8 +306,9 @@
if not self.can_use_pyc(space, filename + ext,
magic, timestamp):
continue
+ # zipimport ignores the size field
code_w = importing.read_compiled_module(
- space, filename + ext, source[8:])
+ space, filename + ext, source[12:])
else:
co_filename = self.make_co_filename(filename+ext)
code_w = importing.parse_source_module(
@@ -327,7 +329,7 @@
w_data = self.get_data(space, fname)
# XXX CPython does not handle the coding cookie either.
return space.call_method(w_data, "decode",
- space.wrap("utf-8"))
+ space.wrap("utf-8"))
else:
found = True
if found:
diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py
--- a/pypy/module/zipimport/test/test_zipimport.py
+++ b/pypy/module/zipimport/test/test_zipimport.py
@@ -93,8 +93,9 @@
def get_file():
return __file__"""
data = marshal.dumps(compile(source, 'uuu.py', 'exec'))
+ size = len(data).to_bytes(4, 'little', signed=True)
- return imp.get_magic() + mtimeb + data
+ return imp.get_magic() + mtimeb + size + data
def w_now_in_the_future(self, delta):
self.now += delta
From pypy.commits at gmail.com Wed May 4 04:01:25 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 01:01:25 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: in-progress: "test_transformed_gc -k
Inc" seems happy
Message-ID: <5729ac55.c110c20a.3aad2.ffffb911@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84184:70f42e6f2872
Date: 2016-05-04 10:00 +0200
http://bitbucket.org/pypy/pypy/changeset/70f42e6f2872/
Log: in-progress: "test_transformed_gc -k Inc" seems happy
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -7,6 +7,7 @@
from rpython.memory.support import get_address_stack, get_address_deque
from rpython.memory.support import AddressDict, null_address_dict
from rpython.rtyper.lltypesystem.llmemory import NULL, raw_malloc_usage
+from rpython.rtyper.annlowlevel import cast_adr_to_nongc_instance
TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed),
('size', lltype.Signed),
@@ -40,8 +41,8 @@
self.finalizer_lock = False
def mark_finalizer_to_run(self, fq_index, obj):
- fdeque = self.get_run_finalizer_queue(self.AddressDeque, fq_index)
- fdeque.append(obj)
+ handlers = self.finalizer_handlers()
+ self._adr2deque(handlers[fq_index].deque).append(obj)
def post_setup(self):
# More stuff that needs to be initialized when the GC is already
@@ -64,8 +65,7 @@
def set_query_functions(self, is_varsize, has_gcptr_in_varsize,
is_gcarrayofgcptr,
- finalizer_trigger,
- get_run_finalizer_queue,
+ finalizer_handlers,
destructor_or_custom_trace,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
@@ -79,8 +79,7 @@
fast_path_tracing,
has_gcptr,
cannot_pin):
- self.finalizer_trigger = finalizer_trigger
- self.get_run_finalizer_queue = get_run_finalizer_queue
+ self.finalizer_handlers = finalizer_handlers
self.destructor_or_custom_trace = destructor_or_custom_trace
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
@@ -332,12 +331,10 @@
enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)'
def enum_pending_finalizers(self, callback, arg):
+ handlers = self.finalizer_handlers()
i = 0
- while True:
- fdeque = self.get_run_finalizer_queue(self.AddressDeque, i)
- if fdeque is None:
- break
- fdeque.foreach(callback, arg)
+ while i < len(handlers):
+ self._adr2deque(handlers[i].deque).foreach(callback, arg)
i += 1
enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
@@ -379,23 +376,25 @@
def debug_check_object(self, obj):
pass
+ def _adr2deque(self, adr):
+ return cast_adr_to_nongc_instance(self.AddressDeque, adr)
+
def execute_finalizers(self):
if self.finalizer_lock:
return # the outer invocation of execute_finalizers() will do it
self.finalizer_lock = True
try:
+ handlers = self.finalizer_handlers()
i = 0
- while True:
- fdeque = self.get_run_finalizer_queue(self.AddressDeque, i)
- if fdeque is None:
- break
- if fdeque.non_empty():
- self.finalizer_trigger(i)
+ while i < len(handlers):
+ if self._adr2deque(handlers[i].deque).non_empty():
+ handlers[i].trigger()
i += 1
finally:
self.finalizer_lock = False
def finalizer_next_dead(self, fq_index):
+ xxxxxxxxxxxx
fdeque = self.get_run_finalizer_queue(self.AddressDeque, fq_index)
if fdeque.non_empty():
obj = fdeque.popleft()
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1568,8 +1568,8 @@
def register_finalizer(self, fq_index, gcobj):
from rpython.rtyper.lltypesystem import rffi
obj = llmemory.cast_ptr_to_adr(gcobj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
self.probably_young_objects_with_finalizers.append(obj)
- fq_index = rffi.cast(llmemory.Address, fq_index)
self.probably_young_objects_with_finalizers.append(fq_index)
# ----------
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -9,8 +9,10 @@
from rpython.memory import gctypelayout
from rpython.memory.gctransform.log import log
from rpython.memory.gctransform.support import get_rtti, ll_call_destructor
+from rpython.memory.gctransform.support import ll_report_finalizer_error
from rpython.memory.gctransform.transform import GCTransformer
from rpython.memory.gctypelayout import ll_weakref_deref, WEAKREF, WEAKREFPTR
+from rpython.memory.gctypelayout import FIN_TRIGGER_FUNC, FIN_HANDLER_ARRAY
from rpython.tool.sourcetools import func_with_new_name
from rpython.translator.backendopt import graphanalyze
from rpython.translator.backendopt.finalizer import FinalizerAnalyzer
@@ -181,9 +183,11 @@
gcdata.max_type_id = 13 # patched in finish()
gcdata.typeids_z = a_random_address # patched in finish()
gcdata.typeids_list = a_random_address # patched in finish()
+ gcdata.finalizer_handlers = a_random_address # patched in finish()
self.gcdata = gcdata
self.malloc_fnptr_cache = {}
self.finalizer_queue_indexes = {}
+ self.finalizer_handlers = []
gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS)
root_walker = self.build_root_walker()
@@ -218,6 +222,7 @@
data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger())
data_classdef.generalize_attr('typeids_z', SomeAddress())
data_classdef.generalize_attr('typeids_list', SomeAddress())
+ data_classdef.generalize_attr('finalizer_handlers', SomeAddress())
annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper)
@@ -256,7 +261,6 @@
self.layoutbuilder.encode_type_shapes_now()
self.create_custom_trace_funcs(gcdata.gc, translator.rtyper)
- self.create_finalizer_trigger(gcdata)
annhelper.finish() # at this point, annotate all mix-level helpers
annhelper.backend_optimize()
@@ -603,11 +607,6 @@
"the custom trace hook %r for %r can cause "
"the GC to be called!" % (func, TP))
- def create_finalizer_trigger(self, gcdata):
- def ll_finalizer_trigger(fq_index):
- pass #xxxxxxxxxxxxx
- gcdata.init_finalizer_trigger(ll_finalizer_trigger)
-
def consider_constant(self, TYPE, value):
self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)
@@ -692,8 +691,15 @@
ll_instance.inst_typeids_list= llmemory.cast_ptr_to_adr(ll_typeids_list)
newgcdependencies.append(ll_typeids_list)
#
- # update this field too
- ll_instance.inst_run_finalizer_queues = self.gcdata.run_finalizer_queues
+ handlers = self.finalizer_handlers
+ ll_handlers = lltype.malloc(FIN_HANDLER_ARRAY, len(handlers),
+ immortal=True)
+ for i in range(len(handlers)):
+ ll_handlers[i].deque = handlers[i][0]
+ ll_handlers[i].trigger = handlers[i][1]
+ ll_instance.inst_finalizer_handlers = llmemory.cast_ptr_to_adr(
+ ll_handlers)
+ newgcdependencies.append(ll_handlers)
#
return newgcdependencies
@@ -1515,8 +1521,34 @@
try:
index = self.finalizer_queue_indexes[fq]
except KeyError:
- index = self.gcdata.register_next_finalizer_queue(
- self.gcdata.gc.AddressDeque)
+ index = len(self.finalizer_queue_indexes)
+ assert index == len(self.finalizer_handlers)
+ deque = self.gcdata.gc.AddressDeque()
+ #
+ def ll_finalizer_trigger():
+ try:
+ fq.finalizer_trigger()
+ except Exception as e:
+ ll_report_finalizer_error(e)
+ ll_trigger = self.annotate_finalizer(ll_finalizer_trigger, [],
+ lltype.Void)
+ def ll_next_dead():
+ if deque.non_empty():
+ return deque.popleft()
+ else:
+ return llmemory.NULL
+ ll_next_dead = self.annotate_finalizer(ll_next_dead, [],
+ llmemory.Address)
+ c_ll_next_dead = rmodel.inputconst(lltype.typeOf(ll_next_dead),
+ ll_next_dead)
+ #
+ s_deque = self.translator.annotator.bookkeeper.immutablevalue(deque)
+ r_deque = self.translator.rtyper.getrepr(s_deque)
+ ll_deque = r_deque.convert_const(deque)
+ adr_deque = llmemory.cast_ptr_to_adr(ll_deque)
+ #
+ self.finalizer_handlers.append((adr_deque, ll_trigger,
+ c_ll_next_dead))
self.finalizer_queue_indexes[fq] = index
return index
@@ -1530,7 +1562,12 @@
c_index, v_ptr])
def gct_gc_fq_next_dead(self, hop):
- xxxx
+ index = self.get_finalizer_queue_index(hop)
+ c_ll_next_dead = self.finalizer_handlers[index][2]
+ v_adr = hop.genop("direct_call", [c_ll_next_dead],
+ resulttype=llmemory.Address)
+ hop.genop("cast_adr_to_ptr", [v_adr],
+ resultvar = hop.spaceop.result)
class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder):
diff --git a/rpython/memory/gctransform/support.py b/rpython/memory/gctransform/support.py
--- a/rpython/memory/gctransform/support.py
+++ b/rpython/memory/gctransform/support.py
@@ -89,3 +89,11 @@
write(2, " ignoring it\n")
except:
pass
+
+def ll_report_finalizer_error(e):
+ try:
+ write(2, "triggering finalizers raised an exception ")
+ write(2, str(e))
+ write(2, " ignoring it\n")
+ except:
+ pass
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -4,9 +4,6 @@
from rpython.rlib.debug import ll_assert
from rpython.rlib.rarithmetic import intmask
from rpython.tool.identity_dict import identity_dict
-from rpython.memory.support import make_list_of_nongc_instances
-from rpython.memory.support import list_set_nongc_instance
-from rpython.memory.support import list_get_nongc_instance
class GCData(object):
@@ -50,7 +47,6 @@
assert isinstance(type_info_group, llgroup.group)
self.type_info_group = type_info_group
self.type_info_group_ptr = type_info_group._as_ptr()
- self.run_finalizer_queues = make_list_of_nongc_instances(1)
def get(self, typeid):
res = llop.get_group_member(GCData.TYPE_INFO_PTR,
@@ -87,30 +83,9 @@
ANY = (T_HAS_GCPTR | T_IS_WEAKREF)
return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
- def init_finalizer_trigger(self, finalizer_trigger):
- self._finalizer_trigger = finalizer_trigger
-
- def register_next_finalizer_queue(self, AddressDeque):
- "NOT_RPYTHON"
- # 'self.run_finalizer_queues' has got no length, but is NULL-terminated
- prevlength = self.run_finalizer_queues._obj.getlength()
- array = make_list_of_nongc_instances(prevlength + 1)
- for i in range(prevlength):
- array[i] = self.run_finalizer_queues[i]
- self.run_finalizer_queues = array
- #
- fq_index = prevlength - 1
- assert fq_index >= 0
- list_set_nongc_instance(self.run_finalizer_queues, fq_index,
- AddressDeque())
- return fq_index
-
- def q_finalizer_trigger(self, fq_index):
- self._finalizer_trigger(fq_index)
-
- def q_get_run_finalizer_queue(self, AddressDeque, fq_index):
- return list_get_nongc_instance(AddressDeque,
- self.run_finalizer_queues, fq_index)
+ def q_finalizer_handlers(self):
+ adr = self.finalizer_handlers # set from framework.py or gcwrapper.py
+ return llmemory.cast_adr_to_ptr(adr, lltype.Ptr(FIN_HANDLER_ARRAY))
def q_destructor_or_custom_trace(self, typeid):
return self.get(typeid).customfunc
@@ -165,8 +140,7 @@
self.q_is_varsize,
self.q_has_gcptr_in_varsize,
self.q_is_gcarrayofgcptr,
- self.q_finalizer_trigger,
- self.q_get_run_finalizer_queue,
+ self.q_finalizer_handlers,
self.q_destructor_or_custom_trace,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
@@ -568,3 +542,9 @@
link = lltype.malloc(WEAKREF, immortal=True)
link.weakptr = llmemory.cast_ptr_to_adr(targetptr)
return link
+
+########## finalizers ##########
+
+FIN_TRIGGER_FUNC = lltype.FuncType([], lltype.Void)
+FIN_HANDLER_ARRAY = lltype.Array(('deque', llmemory.Address),
+ ('trigger', lltype.Ptr(FIN_TRIGGER_FUNC)))
diff --git a/rpython/memory/support.py b/rpython/memory/support.py
--- a/rpython/memory/support.py
+++ b/rpython/memory/support.py
@@ -396,17 +396,3 @@
def _null_value_checker(key, value, arg):
if value:
arg.setitem(key, value)
-
-# ____________________________________________________________
-
-NONGCARRAY = lltype.Array(NONGCOBJECTPTR, hints={'nolength': True})
-
-def make_list_of_nongc_instances(count):
- return lltype.malloc(NONGCARRAY, count, flavor='raw', zero=True,
- track_allocation=False)
-
-def list_get_nongc_instance(Class, array, index):
- return cast_base_ptr_to_nongc_instance(Class, array[index])
-
-def list_set_nongc_instance(array, index, instance):
- array[index] = cast_nongc_instance_to_base_ptr(instance)
diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -50,6 +50,8 @@
taggedpointers = False
def setup_class(cls):
+ if cls is not TestIncrementalMiniMarkGC:
+ py.test.skip("FOO")
cls.marker = lltype.malloc(rffi.CArray(lltype.Signed), 1,
flavor='raw', zero=True)
funcs0 = []
diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py
--- a/rpython/rtyper/annlowlevel.py
+++ b/rpython/rtyper/annlowlevel.py
@@ -526,6 +526,12 @@
ptr = lltype.cast_opaque_ptr(OBJECTPTR, ptr)
return cast_base_ptr_to_instance(Class, ptr)
+ at specialize.arg(0)
+def cast_adr_to_nongc_instance(Class, ptr):
+ from rpython.rtyper.rclass import NONGCOBJECTPTR
+ ptr = llmemory.cast_adr_to_ptr(ptr, NONGCOBJECTPTR)
+ return cast_base_ptr_to_nongc_instance(Class, ptr)
+
class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry):
_about_ = cast_base_ptr_to_instance
From pypy.commits at gmail.com Wed May 4 04:01:23 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 01:01:23 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Mess
Message-ID: <5729ac53.c42e1c0a.8f604.00c2@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84183:96fc68198993
Date: 2016-05-03 23:32 +0200
http://bitbucket.org/pypy/pypy/changeset/96fc68198993/
Log: Mess
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -6,9 +6,6 @@
from rpython.memory.support import DEFAULT_CHUNK_SIZE
from rpython.memory.support import get_address_stack, get_address_deque
from rpython.memory.support import AddressDict, null_address_dict
-from rpython.memory.support import make_list_of_nongc_instances
-from rpython.memory.support import list_set_nongc_instance
-from rpython.memory.support import list_get_nongc_instance
from rpython.rtyper.lltypesystem.llmemory import NULL, raw_malloc_usage
TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed),
@@ -36,31 +33,14 @@
self.config = config
assert isinstance(translated_to_c, bool)
self.translated_to_c = translated_to_c
- self.run_finalizer_queues = make_list_of_nongc_instances(0)
def setup(self):
# all runtime mutable values' setup should happen here
# and in its overriden versions! for the benefit of test_transformed_gc
self.finalizer_lock = False
- def register_finalizer_index(self, fq, index):
- "NOT_RPYTHON"
- if len(self.run_finalizer_queues) <= index:
- array = make_list_of_nongc_instances(index + 1)
- for i in range(len(self.run_finalizer_queues)):
- array[i] = self.run_finalizer_queues[i]
- self.run_finalizer_queues = array
- #
- fdold = list_get_nongc_instance(self.AddressDeque,
- self.run_finalizer_queues, index)
- list_set_nongc_instance(self.run_finalizer_queues, index,
- self.AddressDeque())
- if fdold is not None:
- fdold.delete()
-
def mark_finalizer_to_run(self, fq_index, obj):
- fdeque = list_get_nongc_instance(self.AddressDeque,
- self.run_finalizer_queues, fq_index)
+ fdeque = self.get_run_finalizer_queue(self.AddressDeque, fq_index)
fdeque.append(obj)
def post_setup(self):
@@ -85,6 +65,7 @@
def set_query_functions(self, is_varsize, has_gcptr_in_varsize,
is_gcarrayofgcptr,
finalizer_trigger,
+ get_run_finalizer_queue,
destructor_or_custom_trace,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
@@ -99,6 +80,7 @@
has_gcptr,
cannot_pin):
self.finalizer_trigger = finalizer_trigger
+ self.get_run_finalizer_queue = get_run_finalizer_queue
self.destructor_or_custom_trace = destructor_or_custom_trace
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
@@ -351,11 +333,11 @@
def enum_pending_finalizers(self, callback, arg):
i = 0
- while i < len(self.run_finalizer_queues):
- fdeque = list_get_nongc_instance(self.AddressDeque,
- self.run_finalizer_queues, i)
- if fdeque is not None:
- fdeque.foreach(callback, arg)
+ while True:
+ fdeque = self.get_run_finalizer_queue(self.AddressDeque, i)
+ if fdeque is None:
+ break
+ fdeque.foreach(callback, arg)
i += 1
enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
@@ -403,18 +385,18 @@
self.finalizer_lock = True
try:
i = 0
- while i < len(self.run_finalizer_queues):
- fdeque = list_get_nongc_instance(self.AddressDeque,
- self.run_finalizer_queues, i)
- if fdeque is not None and fdeque.non_empty():
+ while True:
+ fdeque = self.get_run_finalizer_queue(self.AddressDeque, i)
+ if fdeque is None:
+ break
+ if fdeque.non_empty():
self.finalizer_trigger(i)
i += 1
finally:
self.finalizer_lock = False
def finalizer_next_dead(self, fq_index):
- fdeque = list_get_nongc_instance(self.AddressDeque,
- self.run_finalizer_queues, fq_index)
+ fdeque = self.get_run_finalizer_queue(self.AddressDeque, fq_index)
if fdeque.non_empty():
obj = fdeque.popleft()
else:
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -183,6 +183,7 @@
gcdata.typeids_list = a_random_address # patched in finish()
self.gcdata = gcdata
self.malloc_fnptr_cache = {}
+ self.finalizer_queue_indexes = {}
gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS)
root_walker = self.build_root_walker()
@@ -554,6 +555,12 @@
[s_gc, s_typeid16],
s_gcref)
+ self.register_finalizer_ptr = getfn(GCClass.register_finalizer,
+ [s_gc,
+ annmodel.SomeInteger(),
+ s_gcref],
+ annmodel.s_None)
+
def create_custom_trace_funcs(self, gc, rtyper):
custom_trace_funcs = tuple(rtyper.custom_trace_funcs)
rtyper.custom_trace_funcs = custom_trace_funcs
@@ -685,6 +692,9 @@
ll_instance.inst_typeids_list= llmemory.cast_ptr_to_adr(ll_typeids_list)
newgcdependencies.append(ll_typeids_list)
#
+ # update this field too
+ ll_instance.inst_run_finalizer_queues = self.gcdata.run_finalizer_queues
+ #
return newgcdependencies
def get_finish_tables(self):
@@ -1498,6 +1508,29 @@
return None
return getattr(obj, '_hash_cache_', None)
+ def get_finalizer_queue_index(self, hop):
+ fq_tag = hop.spaceop.args[0].value
+ assert fq_tag.expr == 'FinalizerQueue TAG'
+ fq = fq_tag.default
+ try:
+ index = self.finalizer_queue_indexes[fq]
+ except KeyError:
+ index = self.gcdata.register_next_finalizer_queue(
+ self.gcdata.gc.AddressDeque)
+ self.finalizer_queue_indexes[fq] = index
+ return index
+
+ def gct_gc_fq_register(self, hop):
+ index = self.get_finalizer_queue_index(hop)
+ c_index = rmodel.inputconst(lltype.Signed, index)
+ v_ptr = hop.spaceop.args[1]
+ v_ptr = hop.genop("cast_opaque_ptr", [v_ptr],
+ resulttype=llmemory.GCREF)
+ hop.genop("direct_call", [self.register_finalizer_ptr, self.c_const_gc,
+ c_index, v_ptr])
+
+ def gct_gc_fq_next_dead(self, hop):
+ xxxx
class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder):
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -4,6 +4,9 @@
from rpython.rlib.debug import ll_assert
from rpython.rlib.rarithmetic import intmask
from rpython.tool.identity_dict import identity_dict
+from rpython.memory.support import make_list_of_nongc_instances
+from rpython.memory.support import list_set_nongc_instance
+from rpython.memory.support import list_get_nongc_instance
class GCData(object):
@@ -47,6 +50,7 @@
assert isinstance(type_info_group, llgroup.group)
self.type_info_group = type_info_group
self.type_info_group_ptr = type_info_group._as_ptr()
+ self.run_finalizer_queues = make_list_of_nongc_instances(1)
def get(self, typeid):
res = llop.get_group_member(GCData.TYPE_INFO_PTR,
@@ -86,9 +90,28 @@
def init_finalizer_trigger(self, finalizer_trigger):
self._finalizer_trigger = finalizer_trigger
+ def register_next_finalizer_queue(self, AddressDeque):
+ "NOT_RPYTHON"
+ # 'self.run_finalizer_queues' has got no length, but is NULL-terminated
+ prevlength = self.run_finalizer_queues._obj.getlength()
+ array = make_list_of_nongc_instances(prevlength + 1)
+ for i in range(prevlength):
+ array[i] = self.run_finalizer_queues[i]
+ self.run_finalizer_queues = array
+ #
+ fq_index = prevlength - 1
+ assert fq_index >= 0
+ list_set_nongc_instance(self.run_finalizer_queues, fq_index,
+ AddressDeque())
+ return fq_index
+
def q_finalizer_trigger(self, fq_index):
self._finalizer_trigger(fq_index)
+ def q_get_run_finalizer_queue(self, AddressDeque, fq_index):
+ return list_get_nongc_instance(AddressDeque,
+ self.run_finalizer_queues, fq_index)
+
def q_destructor_or_custom_trace(self, typeid):
return self.get(typeid).customfunc
@@ -143,6 +166,7 @@
self.q_has_gcptr_in_varsize,
self.q_is_gcarrayofgcptr,
self.q_finalizer_trigger,
+ self.q_get_run_finalizer_queue,
self.q_destructor_or_custom_trace,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -23,7 +23,7 @@
self.prepare_graphs(flowgraphs)
self.gc.setup()
self.finalizer_queue_indexes = {}
- self.finalizer_queues = []
+ self.finalizer_queues = {}
self.has_write_barrier_from_array = hasattr(self.gc,
'write_barrier_from_array')
@@ -35,6 +35,7 @@
self.get_type_id = layoutbuilder.get_type_id
gcdata = layoutbuilder.initialize_gc_query_function(self.gc)
gcdata.init_finalizer_trigger(self.finalizer_trigger)
+ self.gcdata = gcdata
constants = collect_constants(flowgraphs)
for obj in constants:
@@ -207,11 +208,10 @@
try:
index = self.finalizer_queue_indexes[fq]
except KeyError:
- index = len(self.finalizer_queue_indexes)
- assert index == len(self.finalizer_queues)
+ index = self.gcdata.register_next_finalizer_queue(
+ self.gc.AddressDeque)
self.finalizer_queue_indexes[fq] = index
- self.finalizer_queues.append(fq)
- self.gc.register_finalizer_index(fq, index)
+ self.finalizer_queues[index] = fq
return index
def gc_fq_next_dead(self, fq_tag):
diff --git a/rpython/memory/support.py b/rpython/memory/support.py
--- a/rpython/memory/support.py
+++ b/rpython/memory/support.py
@@ -399,7 +399,7 @@
# ____________________________________________________________
-NONGCARRAY = lltype.Array(NONGCOBJECTPTR)
+NONGCARRAY = lltype.Array(NONGCOBJECTPTR, hints={'nolength': True})
def make_list_of_nongc_instances(count):
return lltype.malloc(NONGCARRAY, count, flavor='raw', zero=True,
From pypy.commits at gmail.com Wed May 4 04:25:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 01:25:35 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Hacks to make the non-translated tests
pass again
Message-ID: <5729b1ff.d72d1c0a.d99c4.ffffcdf4@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84185:b628173116b0
Date: 2016-05-04 10:25 +0200
http://bitbucket.org/pypy/pypy/changeset/b628173116b0/
Log: Hacks to make the non-translated tests pass again
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -393,15 +393,6 @@
finally:
self.finalizer_lock = False
- def finalizer_next_dead(self, fq_index):
- xxxxxxxxxxxx
- fdeque = self.get_run_finalizer_queue(self.AddressDeque, fq_index)
- if fdeque.non_empty():
- obj = fdeque.popleft()
- else:
- obj = llmemory.NULL
- return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
-
class MovingGCBase(GCBase):
moving_gc = True
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -1,7 +1,7 @@
from rpython.translator.backendopt.finalizer import FinalizerAnalyzer
from rpython.rtyper.lltypesystem import lltype, llmemory, llheap
from rpython.rtyper import llinterp, rclass
-from rpython.rtyper.annlowlevel import llhelper
+from rpython.rtyper.annlowlevel import llhelper, cast_nongc_instance_to_adr
from rpython.memory import gctypelayout
from rpython.flowspace.model import Constant
from rpython.rlib import rgc
@@ -22,8 +22,6 @@
self.llinterp = llinterp
self.prepare_graphs(flowgraphs)
self.gc.setup()
- self.finalizer_queue_indexes = {}
- self.finalizer_queues = {}
self.has_write_barrier_from_array = hasattr(self.gc,
'write_barrier_from_array')
@@ -34,9 +32,12 @@
self.llinterp)
self.get_type_id = layoutbuilder.get_type_id
gcdata = layoutbuilder.initialize_gc_query_function(self.gc)
- gcdata.init_finalizer_trigger(self.finalizer_trigger)
self.gcdata = gcdata
+ self.finalizer_queue_indexes = {}
+ self.finalizer_handlers = []
+ self.update_finalizer_handlers()
+
constants = collect_constants(flowgraphs)
for obj in constants:
TYPE = lltype.typeOf(obj)
@@ -193,14 +194,27 @@
def thread_run(self):
pass
- def finalizer_trigger(self, fq_index):
- fq = self.finalizer_queues[fq_index]
+ def _get_finalizer_trigger(self, fq):
graph = self.translator._graphof(fq.finalizer_trigger.im_func)
- try:
- self.llinterp.eval_graph(graph, [None], recursive=True)
- except llinterp.LLException:
- raise RuntimeError(
- "finalizer_trigger() raised an exception, shouldn't happen")
+ def ll_trigger():
+ try:
+ self.llinterp.eval_graph(graph, [None], recursive=True)
+ except llinterp.LLException:
+ raise RuntimeError(
+ "finalizer_trigger() raised an exception, shouldn't happen")
+ return ll_trigger
+
+ def update_finalizer_handlers(self):
+ handlers = self.finalizer_handlers
+ ll_handlers = lltype.malloc(gctypelayout.FIN_HANDLER_ARRAY,
+ len(handlers), immortal=True)
+ for i in range(len(handlers)):
+ fq, deque = handlers[i]
+ ll_handlers[i].deque = cast_nongc_instance_to_adr(deque)
+ ll_handlers[i].trigger = llhelper(
+ lltype.Ptr(gctypelayout.FIN_TRIGGER_FUNC),
+ self._get_finalizer_trigger(fq))
+ self.gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
def get_finalizer_queue_index(self, fq_tag):
assert fq_tag.expr == 'FinalizerQueue TAG'
@@ -208,16 +222,21 @@
try:
index = self.finalizer_queue_indexes[fq]
except KeyError:
- index = self.gcdata.register_next_finalizer_queue(
- self.gc.AddressDeque)
+ index = len(self.finalizer_handlers)
self.finalizer_queue_indexes[fq] = index
- self.finalizer_queues[index] = fq
+ deque = self.gc.AddressDeque()
+ self.finalizer_handlers.append((fq, deque))
+ self.update_finalizer_handlers()
return index
def gc_fq_next_dead(self, fq_tag):
index = self.get_finalizer_queue_index(fq_tag)
- return lltype.cast_opaque_ptr(rclass.OBJECTPTR,
- self.gc.finalizer_next_dead(index))
+ deque = self.finalizer_handlers[index][1]
+ if deque.non_empty():
+ obj = deque.popleft()
+ else:
+ obj = llmemory.NULL
+ return llmemory.cast_adr_to_ptr(obj, rclass.OBJECTPTR)
def gc_fq_register(self, fq_tag, ptr):
index = self.get_finalizer_queue_index(fq_tag)
diff --git a/rpython/memory/support.py b/rpython/memory/support.py
--- a/rpython/memory/support.py
+++ b/rpython/memory/support.py
@@ -295,6 +295,9 @@
cur = next
free_non_gc_object(self)
+ def _was_freed(self):
+ return False # otherwise, the __class__ changes
+
cache[chunk_size] = AddressDeque
return AddressDeque
diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py
--- a/rpython/rtyper/annlowlevel.py
+++ b/rpython/rtyper/annlowlevel.py
@@ -476,6 +476,10 @@
from rpython.rtyper.rclass import NONGCOBJECTPTR
return cast_object_to_ptr(NONGCOBJECTPTR, instance)
+ at specialize.argtype(0)
+def cast_nongc_instance_to_adr(instance):
+ return llmemory.cast_ptr_to_adr(cast_nongc_instance_to_base_ptr(instance))
+
class CastObjectToPtrEntry(extregistry.ExtRegistryEntry):
_about_ = cast_object_to_ptr
From pypy.commits at gmail.com Wed May 4 09:48:38 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 06:48:38 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Check that finalizer_trigger() doesn't
cause GIL-releasing operations,
Message-ID: <5729fdb6.26b0c20a.ef1f4.5586@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84186:6746f707cca8
Date: 2016-05-04 15:48 +0200
http://bitbucket.org/pypy/pypy/changeset/6746f707cca8/
Log: Check that finalizer_trigger() doesn't cause GIL-releasing
operations, like we check in the old-style non-light __del__().
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -501,6 +501,12 @@
return self.bookkeeper.immutablevalue(fq._fq_tag)
def specialize_call(self, hop):
+ from rpython.rtyper.rclass import InstanceRepr
+ translator = hop.rtyper.annotator.translator
+ fq = hop.args_s[0].const
+ graph = translator._graphof(fq.finalizer_trigger.im_func)
+ InstanceRepr.check_graph_of_del_does_not_call_too_much(hop.rtyper,
+ graph)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Signed, hop.s_result.const)
diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py
--- a/rpython/rlib/test/test_rgc.py
+++ b/rpython/rlib/test/test_rgc.py
@@ -1,4 +1,5 @@
from rpython.rtyper.test.test_llinterp import gengraph, interpret
+from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rlib import rgc # Force registration of gc.collect
import gc
@@ -265,7 +266,7 @@
self.x = x
class SimpleFQ(rgc.FinalizerQueue):
- base_class = T_Root
+ Class = T_Root
_triggered = 0
def finalizer_trigger(self):
self._triggered += 1
@@ -367,3 +368,21 @@
assert fq.next_dead() is None
assert deleted == {(1, 42): 1}
assert fq._triggered == 1
+
+ def test_finalizer_trigger_calls_too_much(self):
+ from rpython.rtyper.lltypesystem import lltype, rffi
+ external_func = rffi.llexternal("foo", [], lltype.Void)
+ # ^^^ with release_gil=True
+ class X(object):
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = X
+ def finalizer_trigger(self):
+ external_func()
+ fq = FQ()
+ def f():
+ x = X()
+ fq.register_finalizer(x)
+
+ e = py.test.raises(TyperError, gengraph, f, [])
+ assert str(e.value).startswith('the RPython-level __del__() method in')
diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py
--- a/rpython/rtyper/rclass.py
+++ b/rpython/rtyper/rclass.py
@@ -587,7 +587,8 @@
assert len(s_func.descriptions) == 1
funcdesc, = s_func.descriptions
graph = funcdesc.getuniquegraph()
- self.check_graph_of_del_does_not_call_too_much(graph)
+ self.check_graph_of_del_does_not_call_too_much(self.rtyper,
+ graph)
FUNCTYPE = FuncType([Ptr(source_repr.object_type)], Void)
destrptr = functionptr(FUNCTYPE, graph.name,
graph=graph,
@@ -859,7 +860,8 @@
def can_ll_be_null(self, s_value):
return s_value.can_be_none()
- def check_graph_of_del_does_not_call_too_much(self, graph):
+ @staticmethod
+ def check_graph_of_del_does_not_call_too_much(rtyper, graph):
# RPython-level __del__() methods should not do "too much".
# In the PyPy Python interpreter, they usually do simple things
# like file.__del__() closing the file descriptor; or if they
@@ -872,7 +874,7 @@
#
# XXX wrong complexity, but good enough because the set of
# reachable graphs should be small
- callgraph = self.rtyper.annotator.translator.callgraph.values()
+ callgraph = rtyper.annotator.translator.callgraph.values()
seen = {graph: None}
while True:
oldlength = len(seen)
From pypy.commits at gmail.com Wed May 4 11:01:49 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 08:01:49 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Found the best way forward: restore
much of the removed support for
Message-ID: <572a0edd.08371c0a.138ea.ffff83ce@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84187:6b9a7ecbd6ad
Date: 2016-05-04 17:02 +0200
http://bitbucket.org/pypy/pypy/changeset/6b9a7ecbd6ad/
Log: Found the best way forward: restore much of the removed support for
non-light __del__ and keep both finalizer solutions for now
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -39,8 +39,12 @@
# all runtime mutable values' setup should happen here
# and in its overriden versions! for the benefit of test_transformed_gc
self.finalizer_lock = False
+ self.run_old_style_finalizers = self.AddressDeque()
def mark_finalizer_to_run(self, fq_index, obj):
+ if fq_index == -1: # backward compatibility with old-style finalizer
+ self.run_old_style_finalizers.append(obj)
+ return
handlers = self.finalizer_handlers()
self._adr2deque(handlers[fq_index].deque).append(obj)
@@ -67,6 +71,7 @@
is_gcarrayofgcptr,
finalizer_handlers,
destructor_or_custom_trace,
+ is_old_style_finalizer,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
varsize_offset_to_variable_part,
@@ -81,6 +86,7 @@
cannot_pin):
self.finalizer_handlers = finalizer_handlers
self.destructor_or_custom_trace = destructor_or_custom_trace
+ self.is_old_style_finalizer = is_old_style_finalizer
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
self.is_gcarrayofgcptr = is_gcarrayofgcptr
@@ -143,6 +149,8 @@
size = self.fixed_size(typeid)
needs_destructor = (bool(self.destructor_or_custom_trace(typeid))
and not self.has_custom_trace(typeid))
+ finalizer_is_light = (needs_destructor and
+ not self.is_old_style_finalizer(typeid))
contains_weakptr = self.weakpointer_offset(typeid) >= 0
assert not (needs_destructor and contains_weakptr)
if self.is_varsize(typeid):
@@ -163,6 +171,7 @@
else:
malloc_fixedsize = self.malloc_fixedsize
ref = malloc_fixedsize(typeid, size, needs_destructor,
+ finalizer_is_light,
contains_weakptr)
# lots of cast and reverse-cast around...
ref = llmemory.cast_ptr_to_adr(ref)
@@ -331,6 +340,7 @@
enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)'
def enum_pending_finalizers(self, callback, arg):
+ self.run_old_style_finalizers.foreach(callback, arg)
handlers = self.finalizer_handlers()
i = 0
while i < len(handlers):
@@ -390,6 +400,13 @@
if self._adr2deque(handlers[i].deque).non_empty():
handlers[i].trigger()
i += 1
+ while self.run_old_style_finalizers.non_empty():
+ obj = self.run_old_style_finalizers.popleft()
+ typeid = self.get_type_id(obj)
+ ll_assert(self.is_old_style_finalizer(typeid),
+ "bogus old-style finalizer")
+ finalizer = self.destructor_or_custom_trace(typeid)
+ finalizer(obj)
finally:
self.finalizer_lock = False
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -609,11 +609,25 @@
def malloc_fixedsize(self, typeid, size,
needs_destructor=False,
+ is_finalizer_light=False,
contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
rawtotalsize = raw_malloc_usage(totalsize)
#
+ # If the object needs a finalizer, ask for a rawmalloc.
+ # The following check should be constant-folded.
+ if needs_destructor and not is_finalizer_light:
+ # old-style finalizers only!
+ from rpython.rtyper.lltypesystem import rffi
+ ll_assert(not contains_weakptr,
+ "'needs_finalizer' and 'contains_weakptr' both specified")
+ obj = self.external_malloc(typeid, 0, alloc_young=False)
+ self.old_objects_with_finalizers.append(obj)
+ self.old_objects_with_finalizers.append(
+ rffi.cast(llmemory.Address, -1))
+ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+ #
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
# should be constant-folded.
@@ -850,6 +864,7 @@
collect_and_reserve._dont_inline_ = True
+ # XXX kill alloc_young and make it always True
def external_malloc(self, typeid, length, alloc_young):
"""Allocate a large object using the ArenaCollection or
raw_malloc(), possibly as an object with card marking enabled,
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -1605,9 +1605,12 @@
ll_call_destructor(destrptr, v, typename)
fptr = self.transformer.annotate_finalizer(ll_finalizer,
[llmemory.Address], lltype.Void)
- g = destrptr._obj.graph
- FinalizerAnalyzer(self.translator).check_light_finalizer(g)
- return fptr
+ try:
+ g = destrptr._obj.graph
+ light = not FinalizerAnalyzer(self.translator).analyze_light_finalizer(g)
+ except lltype.DelayedPointer:
+ light = False # XXX bah, too bad
+ return fptr, light
def make_custom_trace_funcptr_for_type(self, TYPE):
if not self.has_custom_trace(TYPE):
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -90,6 +90,10 @@
def q_destructor_or_custom_trace(self, typeid):
return self.get(typeid).customfunc
+ def q_is_old_style_finalizer(self, typeid):
+ typeinfo = self.get(typeid)
+ return (typeinfo.infobits & T_HAS_OLDSTYLE_FINALIZER) != 0
+
def q_offsets_to_gc_pointers(self, typeid):
return self.get(typeid).ofstoptrs
@@ -142,6 +146,7 @@
self.q_is_gcarrayofgcptr,
self.q_finalizer_handlers,
self.q_destructor_or_custom_trace,
+ self.q_is_old_style_finalizer,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
self.q_varsize_item_sizes,
@@ -169,8 +174,9 @@
T_IS_WEAKREF = 0x080000
T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT
T_HAS_CUSTOM_TRACE = 0x200000
-T_HAS_GCPTR = 0x400000
-T_KEY_MASK = intmask(0xFF000000) # bug detection only
+T_HAS_OLDSTYLE_FINALIZER = 0x400000
+T_HAS_GCPTR = 0x1000000
+T_KEY_MASK = intmask(0xFE000000) # bug detection only
T_KEY_VALUE = intmask(0x5A000000) # bug detection only
def _check_valid_type_info(p):
@@ -199,6 +205,9 @@
if fptrs:
if "destructor" in fptrs:
info.customfunc = fptrs["destructor"]
+ if "old_style_finalizer" in fptrs:
+ info.customfunc = fptrs["old_style_finalizer"]
+ infobits |= T_HAS_OLDSTYLE_FINALIZER
#
if not TYPE._is_varsize():
info.fixedsize = llarena.round_up_for_allocation(
@@ -368,11 +377,14 @@
def special_funcptr_for_type(self, TYPE):
if TYPE in self._special_funcptrs:
return self._special_funcptrs[TYPE]
- fptr1 = self.make_destructor_funcptr_for_type(TYPE)
+ fptr1, is_lightweight = self.make_destructor_funcptr_for_type(TYPE)
fptr2 = self.make_custom_trace_funcptr_for_type(TYPE)
result = {}
if fptr1:
- result["destructor"] = fptr1
+ if is_lightweight:
+ result["destructor"] = fptr1
+ else:
+ result["old_style_finalizer"] = fptr1
if fptr2:
result["custom_trace"] = fptr2
self._special_funcptrs[TYPE] = result
@@ -386,10 +398,6 @@
# must be overridden for proper custom tracer support
return None
- def make_finalizer_trigger(self):
- # must be overridden for proper finalizer support
- return None
-
def initialize_gc_query_function(self, gc):
gcdata = GCData(self.type_info_group)
gcdata.set_query_functions(gc)
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -292,10 +292,10 @@
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
destrgraph = destrptr._obj.graph
else:
- return None
+ return None, False
t = self.llinterp.typer.annotator.translator
- FinalizerAnalyzer(t).check_light_finalizer(destrgraph)
+ is_light = not FinalizerAnalyzer(t).analyze_light_finalizer(destrgraph)
def ll_destructor(addr):
try:
@@ -304,7 +304,8 @@
except llinterp.LLException:
raise RuntimeError(
"a destructor raised an exception, shouldn't happen")
- return llhelper(gctypelayout.GCData.CUSTOM_FUNC_PTR, ll_destructor)
+ return (llhelper(gctypelayout.GCData.CUSTOM_FUNC_PTR, ll_destructor),
+ is_light)
def make_custom_trace_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -152,6 +152,31 @@
res = self.interpret(f, [5])
assert res == 6
+ def test_old_style_finalizer(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ def __del__(self):
+ llop.gc__collect(lltype.Void)
+ b.num_deleted += 1
+ def f(x):
+ a = A()
+ i = 0
+ while i < x:
+ i += 1
+ a = A()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted
+ res = self.interpret(f, [5])
+ assert res == 6
+
def test_finalizer(self):
class B(object):
pass
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -362,9 +362,7 @@
return func
def must_be_light_finalizer(func):
- import warnings
- warnings.warn("@must_be_light_finalizer is implied and has no effect "
- "any more", DeprecationWarning)
+ func._must_be_light_finalizer_ = True
return func
diff --git a/rpython/translator/backendopt/finalizer.py b/rpython/translator/backendopt/finalizer.py
--- a/rpython/translator/backendopt/finalizer.py
+++ b/rpython/translator/backendopt/finalizer.py
@@ -1,6 +1,9 @@
-
from rpython.translator.backendopt import graphanalyze
from rpython.rtyper.lltypesystem import lltype
+from rpython.tool.ansi_print import AnsiLogger
+
+log = AnsiLogger("finalizer")
+
class FinalizerError(Exception):
"""__del__() is used for lightweight RPython destructors,
@@ -23,13 +26,19 @@
'raw_free', 'adr_eq', 'adr_ne',
'debug_print']
- def check_light_finalizer(self, graph):
- self._origin = graph
- result = self.analyze_direct_call(graph)
- del self._origin
- if result is self.top_result():
- msg = '%s\nIn %r' % (FinalizerError.__doc__, graph)
- raise FinalizerError(msg)
+ def analyze_light_finalizer(self, graph):
+ if getattr(graph.func, '_must_be_light_finalizer_', False):
+ self._must_be_light = graph
+ result = self.analyze_direct_call(graph)
+ del self._must_be_light
+ if result is self.top_result():
+ msg = '%s\nIn %r' % (FinalizerError.__doc__, graph)
+ raise FinalizerError(msg)
+ else:
+ result = self.analyze_direct_call(graph)
+ if result is self.top_result():
+ log.red('old-style non-light finalizer: %r' % (graph,))
+ return result
def analyze_simple_operation(self, op, graphinfo):
if op.opname in self.ok_operations:
@@ -48,9 +57,8 @@
# primitive type
return self.bottom_result()
- if not hasattr(self, '_origin'): # for tests
+ if not hasattr(self, '_must_be_light'):
return self.top_result()
msg = '%s\nFound this forbidden operation:\n%r\nin %r\nfrom %r' % (
- FinalizerError.__doc__, op, graphinfo,
- getattr(self, '_origin', '?'))
+ FinalizerError.__doc__, op, graphinfo, self._must_be_light)
raise FinalizerError(msg)
diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py
--- a/rpython/translator/backendopt/test/test_finalizer.py
+++ b/rpython/translator/backendopt/test/test_finalizer.py
@@ -26,12 +26,8 @@
t.view()
a = FinalizerAnalyzer(t)
fgraph = graphof(t, func_to_analyze)
- try:
- a.check_light_finalizer(fgraph)
- except FinalizerError as e:
- print e
- return a.top_result() # True
- return a.bottom_result() # False
+ result = a.analyze_light_finalizer(fgraph)
+ return result
def test_nothing(self):
def f():
From pypy.commits at gmail.com Wed May 4 11:19:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 08:19:42 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: More reverts, and adapt the docs
Message-ID: <572a130e.6322c20a.3786f.ffff85e5@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84188:4a45bfe534bc
Date: 2016-05-04 17:19 +0200
http://bitbucket.org/pypy/pypy/changeset/4a45bfe534bc/
Log: More reverts, and adapt the docs
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -12,10 +12,15 @@
* RPython objects can have ``__del__()``. These are called
immediately by the GC when the last reference to the object goes
- away, like in CPython. However (like "lightweight finalizers" used
- to be), all ``__del__()`` methods must only contain simple enough
- code, and this is checked. We call this "destructors". They can't
- use operations that would resurrect the object, for example.
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
* For any more advanced usage --- in particular for any app-level
object with a __del__ --- we don't use the RPython-level
diff --git a/rpython/doc/rpython.rst b/rpython/doc/rpython.rst
--- a/rpython/doc/rpython.rst
+++ b/rpython/doc/rpython.rst
@@ -191,9 +191,9 @@
``__setitem__`` for slicing isn't supported. Additionally, using negative
indices for slicing is still not support, even when using ``__getslice__``.
- Note that from May 2016 the destructor ``__del__`` must only contain
- `simple operations`__; for any kind of more complex destructor, see
- ``rpython.rlib.rgc.register_finalizer()``.
+ Note that the destructor ``__del__`` should only contain `simple
+ operations`__; for any kind of more complex destructor, consider
+ using instead ``rpython.rlib.rgc.FinalizerQueue``.
.. __: garbage_collection.html
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -147,15 +147,15 @@
the four malloc_[fixed,var]size[_clear]() functions.
"""
size = self.fixed_size(typeid)
- needs_destructor = (bool(self.destructor_or_custom_trace(typeid))
- and not self.has_custom_trace(typeid))
- finalizer_is_light = (needs_destructor and
+ needs_finalizer = (bool(self.destructor_or_custom_trace(typeid))
+ and not self.has_custom_trace(typeid))
+ finalizer_is_light = (needs_finalizer and
not self.is_old_style_finalizer(typeid))
contains_weakptr = self.weakpointer_offset(typeid) >= 0
- assert not (needs_destructor and contains_weakptr)
+ assert not (needs_finalizer and contains_weakptr)
if self.is_varsize(typeid):
assert not contains_weakptr
- assert not needs_destructor
+ assert not needs_finalizer
itemsize = self.varsize_item_sizes(typeid)
offset_to_length = self.varsize_offset_to_length(typeid)
if self.malloc_zero_filled:
@@ -170,7 +170,7 @@
malloc_fixedsize = self.malloc_fixedsize_clear
else:
malloc_fixedsize = self.malloc_fixedsize
- ref = malloc_fixedsize(typeid, size, needs_destructor,
+ ref = malloc_fixedsize(typeid, size, needs_finalizer,
finalizer_is_light,
contains_weakptr)
# lots of cast and reverse-cast around...
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -608,7 +608,7 @@
def malloc_fixedsize(self, typeid, size,
- needs_destructor=False,
+ needs_finalizer=False,
is_finalizer_light=False,
contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -617,7 +617,7 @@
#
# If the object needs a finalizer, ask for a rawmalloc.
# The following check should be constant-folded.
- if needs_destructor and not is_finalizer_light:
+ if needs_finalizer and not is_finalizer_light:
# old-style finalizers only!
from rpython.rtyper.lltypesystem import rffi
ll_assert(not contains_weakptr,
@@ -657,7 +657,7 @@
#
# If it is a weakref or has a lightweight destructor, record it
# (checks constant-folded).
- if needs_destructor:
+ if needs_finalizer:
self.young_objects_with_destructors.append(obj)
if contains_weakptr:
self.young_objects_with_weakrefs.append(obj)
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -307,6 +307,7 @@
[s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(),
+ annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
self.malloc_varsize_ptr = getfn(
@@ -321,6 +322,7 @@
[s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(),
+ annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
self.malloc_varsize_ptr = getfn(
@@ -383,7 +385,7 @@
malloc_fast,
[s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
- s_False, s_False], s_gcref,
+ s_False, s_False, s_False], s_gcref,
inline = True)
else:
self.malloc_fast_ptr = None
@@ -792,10 +794,11 @@
info = self.layoutbuilder.get_info(type_id)
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
fptrs = self.special_funcptr_for_type(TYPE)
- has_destructor = "destructor" in fptrs
- assert "finalizer" not in fptrs # removed
- assert "light_finalizer" not in fptrs # removed
- c_has_destructor = rmodel.inputconst(lltype.Bool, has_destructor)
+ has_finalizer = "destructor" in fptrs or "old_style_finalizer" in fptrs
+ has_light_finalizer = "destructor" in fptrs
+ c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)
+ c_has_light_finalizer = rmodel.inputconst(lltype.Bool,
+ has_light_finalizer)
if flags.get('nonmovable'):
assert op.opname == 'malloc'
@@ -805,16 +808,16 @@
elif not op.opname.endswith('_varsize') and not flags.get('varsize'):
zero = flags.get('zero', False)
if (self.malloc_fast_ptr is not None and
- not c_has_destructor.value and
+ not c_has_finalizer.value and
(self.malloc_fast_is_clearing or not zero)):
malloc_ptr = self.malloc_fast_ptr
else:
malloc_ptr = self.malloc_fixedsize_ptr
args = [self.c_const_gc, c_type_id, c_size,
- c_has_destructor,
+ c_has_finalizer, c_has_light_finalizer,
rmodel.inputconst(lltype.Bool, False)]
else:
- assert not c_has_destructor.value
+ assert not c_has_finalizer.value
info_varsize = self.layoutbuilder.get_info_varsize(type_id)
v_length = op.args[-1]
c_ofstolength = rmodel.inputconst(lltype.Signed,
@@ -950,12 +953,13 @@
def gct_do_malloc_fixedsize(self, hop):
# used by the JIT (see rpython.jit.backend.llsupport.gc)
op = hop.spaceop
- [v_typeid, v_size, v_has_destructor, v_contains_weakptr] = op.args
+ [v_typeid, v_size,
+ v_has_finalizer, v_has_light_finalizer, v_contains_weakptr] = op.args
livevars = self.push_roots(hop)
hop.genop("direct_call",
[self.malloc_fixedsize_ptr, self.c_const_gc,
v_typeid, v_size,
- v_has_destructor,
+ v_has_finalizer, v_has_light_finalizer,
v_contains_weakptr],
resultvar=op.result)
self.pop_roots(hop, livevars)
@@ -1063,7 +1067,7 @@
c_false = rmodel.inputconst(lltype.Bool, False)
c_has_weakptr = rmodel.inputconst(lltype.Bool, True)
args = [self.c_const_gc, c_type_id, c_size,
- c_false, c_has_weakptr]
+ c_false, c_false, c_has_weakptr]
# push and pop the current live variables *including* the argument
# to the weakref_create operation, which must be kept alive and
@@ -1595,7 +1599,7 @@
def make_destructor_funcptr_for_type(self, TYPE):
if not self.has_destructor(TYPE):
- return None
+ return None, False
rtti = get_rtti(TYPE)
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -323,6 +323,35 @@
res = run([5, 42]) #XXX pure lazyness here too
assert res == 6
+ def define_old_style_finalizer(cls):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ def __del__(self):
+ llop.gc__collect(lltype.Void)
+ b.num_deleted += 1
+ def f(x, y):
+ a = A()
+ i = 0
+ while i < x:
+ i += 1
+ a = A()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted
+ return f
+
+ def test_old_style_finalizer(self):
+ run = self.runner("old_style_finalizer")
+ res = run([5, 42]) #XXX pure lazyness here too
+ assert res == 6
+
def define_finalizer(cls):
class B(object):
pass
@@ -846,7 +875,8 @@
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
- Constant(False, lltype.Bool), # has_destructor
+ Constant(False, lltype.Bool), # has_finalizer
+ Constant(False, lltype.Bool), # has_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
@@ -882,7 +912,8 @@
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
- Constant(False, lltype.Bool), # has_destructor
+ Constant(False, lltype.Bool), # has_finalizer
+ Constant(False, lltype.Bool), # has_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
From pypy.commits at gmail.com Wed May 4 11:27:05 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 08:27:05 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Reduce the diff
Message-ID: <572a14c9.47afc20a.a55a6.ffff81dc@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84189:de981b52f14b
Date: 2016-05-04 17:27 +0200
http://bitbucket.org/pypy/pypy/changeset/de981b52f14b/
Log: Reduce the diff
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -1,7 +1,6 @@
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.debug import ll_assert
-from rpython.rlib.objectmodel import we_are_translated
from rpython.memory.gcheader import GCHeaderBuilder
from rpython.memory.support import DEFAULT_CHUNK_SIZE
from rpython.memory.support import get_address_stack, get_address_deque
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -392,7 +392,7 @@
def make_destructor_funcptr_for_type(self, TYPE):
# must be overridden for proper destructor support
- return None
+ return None, False
def make_custom_trace_funcptr_for_type(self, TYPE):
# must be overridden for proper custom tracer support
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -4,7 +4,6 @@
from rpython.rtyper.annlowlevel import llhelper, cast_nongc_instance_to_adr
from rpython.memory import gctypelayout
from rpython.flowspace.model import Constant
-from rpython.rlib import rgc
class GCManagedHeap(object):
diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -50,8 +50,6 @@
taggedpointers = False
def setup_class(cls):
- if cls is not TestIncrementalMiniMarkGC:
- py.test.skip("FOO")
cls.marker = lltype.malloc(rffi.CArray(lltype.Signed), 1,
flavor='raw', zero=True)
funcs0 = []
@@ -876,7 +874,7 @@
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(False, lltype.Bool), # has_finalizer
- Constant(False, lltype.Bool), # has_finalizer_light
+ Constant(False, lltype.Bool), # is_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
@@ -913,7 +911,7 @@
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(False, lltype.Bool), # has_finalizer
- Constant(False, lltype.Bool), # has_finalizer_light
+ Constant(False, lltype.Bool), # is_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
From pypy.commits at gmail.com Wed May 4 12:11:13 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 09:11:13 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: oops
Message-ID: <572a1f21.4412c30a.5e9c9.ffff97df@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84190:af37b7a7dc95
Date: 2016-05-04 17:50 +0200
http://bitbucket.org/pypy/pypy/changeset/af37b7a7dc95/
Log: oops
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -497,7 +497,7 @@
else:
a.count = 666 # not ok
else:
- if b.ref() is self:
+ if b.ref() is b:
a.count += 10 # ok
else:
a.count = 666 # not ok
From pypy.commits at gmail.com Wed May 4 12:11:15 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 09:11:15 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: update semispace
Message-ID: <572a1f23.4ca51c0a.bb213.ffffdaa3@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84191:6343ed75104e
Date: 2016-05-04 18:11 +0200
http://bitbucket.org/pypy/pypy/changeset/6343ed75104e/
Log: update semispace
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -347,6 +347,32 @@
i += 1
enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
+ def _copy_pending_finalizers_deque(self, deque, copy_fn):
+ tmp = self.AddressDeque()
+ while deque.non_empty():
+ obj = deque.popleft()
+ tmp.append(copy_fn(obj))
+ while tmp.non_empty():
+ deque.append(tmp.popleft())
+ tmp.delete()
+
+ def copy_pending_finalizers(self, copy_fn):
+ "NOTE: not very efficient, but only for SemiSpaceGC and subclasses"
+ self._copy_pending_finalizers_deque(
+ self.run_old_style_finalizers, copy_fn)
+ handlers = self.finalizer_handlers()
+ i = 0
+ while i < len(handlers):
+ h = handlers[i]
+ self._copy_pending_finalizers_deque(
+ self._adr2deque(h.deque), copy_fn)
+ i += 1
+
+ def call_destructor(self, obj):
+ destructor = self.destructor_or_custom_trace(self.get_type_id(obj))
+ ll_assert(bool(destructor), "no destructor found")
+ destructor(obj)
+
def debug_check_consistency(self):
"""To use after a collection. If self.DEBUG is set, this
enumerates all roots and traces all objects to check if we didn't
@@ -402,8 +428,6 @@
while self.run_old_style_finalizers.non_empty():
obj = self.run_old_style_finalizers.popleft()
typeid = self.get_type_id(obj)
- ll_assert(self.is_old_style_finalizer(typeid),
- "bogus old-style finalizer")
finalizer = self.destructor_or_custom_trace(typeid)
finalizer(obj)
finally:
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2602,11 +2602,6 @@
# ----------
# Finalizers
- def call_destructor(self, obj):
- destructor = self.destructor_or_custom_trace(self.get_type_id(obj))
- ll_assert(bool(destructor), "no destructor found")
- destructor(obj)
-
def deal_with_young_objects_with_destructors(self):
"""We can reasonably assume that destructors don't do
anything fancy and *just* call them. Among other things
diff --git a/rpython/memory/gc/semispace.py b/rpython/memory/gc/semispace.py
--- a/rpython/memory/gc/semispace.py
+++ b/rpython/memory/gc/semispace.py
@@ -111,7 +111,9 @@
# self.objects_with_light_finalizers.append(result + size_gc_header)
#else:
if has_finalizer:
+ from rpython.rtyper.lltypesystem import rffi
self.objects_with_finalizers.append(result + size_gc_header)
+ self.objects_with_finalizers.append(rffi.cast(llmemory.Address, -1))
if contains_weakptr:
self.objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
@@ -149,6 +151,13 @@
else:
return False
+ def register_finalizer(self, fq_index, gcobj):
+ from rpython.rtyper.lltypesystem import rffi
+ obj = llmemory.cast_ptr_to_adr(gcobj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
+ self.objects_with_finalizers.append(obj)
+ self.objects_with_finalizers.append(fq_index)
+
def obtain_free_space(self, needed):
# a bit of tweaking to maximize the performance and minimize the
# amount of code in an inlined version of malloc_fixedsize_clear()
@@ -268,8 +277,7 @@
scan = self.free = tospace
self.starting_full_collect()
self.collect_roots()
- if self.run_finalizers.non_empty():
- self.update_run_finalizers()
+ self.copy_pending_finalizers(self.copy)
scan = self.scan_copied(scan)
if self.objects_with_light_finalizers.non_empty():
self.deal_with_objects_with_light_finalizers()
@@ -499,8 +507,7 @@
if self.surviving(obj):
new_objects.append(self.get_forwarding_address(obj))
else:
- finalizer = self.getfinalizer(self.get_type_id(obj))
- finalizer(obj)
+ self.call_destructor(obj)
self.objects_with_light_finalizers.delete()
self.objects_with_light_finalizers = new_objects
@@ -517,12 +524,15 @@
self.tmpstack = self.AddressStack()
while self.objects_with_finalizers.non_empty():
x = self.objects_with_finalizers.popleft()
+ fq_nr = self.objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.surviving(x):
new_with_finalizer.append(self.get_forwarding_address(x))
+ new_with_finalizer.append(fq_nr)
continue
marked.append(x)
+ marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
@@ -537,17 +547,21 @@
while marked.non_empty():
x = marked.popleft()
+ fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
newx = self.get_forwarding_address(x)
if state == 2:
- self.run_finalizers.append(newx)
+ from rpython.rtyper.lltypesystem import rffi
+ fq_index = rffi.cast(lltype.Signed, fq_nr)
+ self.mark_finalizer_to_run(fq_index, newx)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(newx)
+ new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
@@ -627,16 +641,6 @@
self.objects_with_weakrefs.delete()
self.objects_with_weakrefs = new_with_weakref
- def update_run_finalizers(self):
- # we are in an inner collection, caused by a finalizer
- # the run_finalizers objects need to be copied
- new_run_finalizer = self.AddressDeque()
- while self.run_finalizers.non_empty():
- obj = self.run_finalizers.popleft()
- new_run_finalizer.append(self.copy(obj))
- self.run_finalizers.delete()
- self.run_finalizers = new_run_finalizer
-
def _is_external(self, obj):
return (self.header(obj).tid & GCFLAG_EXTERNAL) != 0
From pypy.commits at gmail.com Wed May 4 12:26:40 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 09:26:40 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Fixes
Message-ID: <572a22c0.109a1c0a.a881a.ffff9e82@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84192:db5aabaa5d41
Date: 2016-05-04 18:26 +0200
http://bitbucket.org/pypy/pypy/changeset/db5aabaa5d41/
Log: Fixes
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -427,9 +427,7 @@
i += 1
while self.run_old_style_finalizers.non_empty():
obj = self.run_old_style_finalizers.popleft()
- typeid = self.get_type_id(obj)
- finalizer = self.destructor_or_custom_trace(typeid)
- finalizer(obj)
+ self.call_destructor(obj)
finally:
self.finalizer_lock = False
diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py
--- a/rpython/memory/gc/generation.py
+++ b/rpython/memory/gc/generation.py
@@ -355,6 +355,7 @@
scan = beginning = self.free
self.collect_oldrefs_to_nursery()
self.collect_roots_in_nursery()
+ self.collect_young_objects_with_finalizers()
scan = self.scan_objects_just_copied_out_of_nursery(scan)
# at this point, all static and old objects have got their
# GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
@@ -422,6 +423,19 @@
if self.is_in_nursery(obj):
root.address[0] = self.copy(obj)
+ def collect_young_objects_with_finalizers(self):
+ # XXX always walk the whole 'objects_with_finalizers' list here
+ new = self.AddressDeque()
+ while self.objects_with_finalizers.non_empty():
+ obj = self.objects_with_finalizers.popleft()
+ fq_nr = self.objects_with_finalizers.popleft()
+ if self.is_in_nursery(obj):
+ obj = self.copy(obj)
+ new.append(obj)
+ new.append(fq_nr)
+ self.objects_with_finalizers.delete()
+ self.objects_with_finalizers = new
+
def scan_objects_just_copied_out_of_nursery(self, scan):
while scan < self.free:
curr = scan + self.size_gc_header()
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -619,14 +619,12 @@
# The following check should be constant-folded.
if needs_finalizer and not is_finalizer_light:
# old-style finalizers only!
- from rpython.rtyper.lltypesystem import rffi
ll_assert(not contains_weakptr,
"'needs_finalizer' and 'contains_weakptr' both specified")
obj = self.external_malloc(typeid, 0, alloc_young=False)
- self.old_objects_with_finalizers.append(obj)
- self.old_objects_with_finalizers.append(
- rffi.cast(llmemory.Address, -1))
- return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+ res = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+ self.register_finalizer(-1, res)
+ return res
#
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
From pypy.commits at gmail.com Wed May 4 12:32:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 09:32:35 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Translation fix
Message-ID: <572a2423.22c8c20a.a96ca.ffff9f2c@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84193:724566fe8685
Date: 2016-05-04 18:32 +0200
http://bitbucket.org/pypy/pypy/changeset/724566fe8685/
Log: Translation fix
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -1520,7 +1520,7 @@
def get_finalizer_queue_index(self, hop):
fq_tag = hop.spaceop.args[0].value
- assert fq_tag.expr == 'FinalizerQueue TAG'
+ assert 'FinalizerQueue TAG' in fq_tag.expr
fq = fq_tag.default
try:
index = self.finalizer_queue_indexes[fq]
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -216,7 +216,7 @@
self.gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
def get_finalizer_queue_index(self, fq_tag):
- assert fq_tag.expr == 'FinalizerQueue TAG'
+ assert 'FinalizerQueue TAG' in fq_tag.expr
fq = fq_tag.default
try:
index = self.finalizer_queue_indexes[fq]
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -495,7 +495,9 @@
self.bookkeeper.emulate_pbc_call(self.bookkeeper.position_key,
s_func, [])
if not hasattr(fq, '_fq_tag'):
- fq._fq_tag = CDefinedIntSymbolic('FinalizerQueue TAG', default=fq)
+ fq._fq_tag = CDefinedIntSymbolic(
+ '0 /*FinalizerQueue TAG for %s*/' % fq.__class__.__name__,
+ default=fq)
return self.bookkeeper.immutablevalue(fq._fq_tag)
def specialize_call(self, hop):
From pypy.commits at gmail.com Wed May 4 12:44:45 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 09:44:45 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Copy the changes from incminimark.py
to minimark.py
Message-ID: <572a26fd.22d8c20a.1e4f.ffffa32e@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84194:d05fc0b0e9c8
Date: 2016-05-04 18:44 +0200
http://bitbucket.org/pypy/pypy/changeset/d05fc0b0e9c8/
Log: Copy the changes from incminimark.py to minimark.py
diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py
--- a/rpython/memory/gc/minimark.py
+++ b/rpython/memory/gc/minimark.py
@@ -153,6 +153,8 @@
# ^^^ prebuilt objects may have the flag GCFLAG_HAS_SHADOW;
# then they are one word longer, the extra word storing the hash.
+ _ADDRARRAY = lltype.Array(llmemory.Address, hints={'nolength': True})
+
# During a minor collection, the objects in the nursery that are
# moved outside are changed in-place: their header is replaced with
@@ -309,10 +311,19 @@
self.old_rawmalloced_objects = self.AddressStack()
self.rawmalloced_total_size = r_uint(0)
#
- # A list of all objects with finalizers (these are never young).
- self.objects_with_finalizers = self.AddressDeque()
- self.young_objects_with_light_finalizers = self.AddressStack()
- self.old_objects_with_light_finalizers = self.AddressStack()
+ # Two lists of all objects with finalizers. Actually they are lists
+ # of pairs (finalization_queue_nr, object). "probably young objects"
+ # are all traced and moved to the "old" list by the next minor
+ # collection.
+ self.probably_young_objects_with_finalizers = self.AddressDeque()
+ self.old_objects_with_finalizers = self.AddressDeque()
+ p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
+ track_allocation=False)
+ self.singleaddr = llmemory.cast_ptr_to_adr(p)
+ #
+ # Two lists of all objects with destructors.
+ self.young_objects_with_destructors = self.AddressStack()
+ self.old_objects_with_destructors = self.AddressStack()
#
# Two lists of the objects with weakrefs. No weakref can be an
# old object weakly pointing to a young object: indeed, weakrefs
@@ -517,15 +528,18 @@
# If the object needs a finalizer, ask for a rawmalloc.
# The following check should be constant-folded.
if needs_finalizer and not is_finalizer_light:
+ # old-style finalizers only!
ll_assert(not contains_weakptr,
"'needs_finalizer' and 'contains_weakptr' both specified")
obj = self.external_malloc(typeid, 0, alloc_young=False)
- self.objects_with_finalizers.append(obj)
+ res = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+ self.register_finalizer(-1, res)
+ return res
#
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
# should be constant-folded.
- elif rawtotalsize > self.nonlarge_max:
+ if rawtotalsize > self.nonlarge_max:
ll_assert(not contains_weakptr,
"'contains_weakptr' specified for a large object")
obj = self.external_malloc(typeid, 0, alloc_young=True)
@@ -550,11 +564,13 @@
if is_finalizer_light:
self.young_objects_with_light_finalizers.append(obj)
self.init_gc_object(result, typeid, flags=0)
- #
- # If it is a weakref, record it (check constant-folded).
- if contains_weakptr:
- self.young_objects_with_weakrefs.append(obj)
#
+ # If it is a weakref or has a lightweight destructor, record it
+ # (checks constant-folded).
+ if needs_finalizer:
+ self.young_objects_with_destructors.append(obj)
+ if contains_weakptr:
+ self.young_objects_with_weakrefs.append(obj)
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
@@ -676,6 +692,7 @@
collect_and_reserve._dont_inline_ = True
+ # XXX kill alloc_young and make it always True
def external_malloc(self, typeid, length, alloc_young):
"""Allocate a large object using the ArenaCollection or
raw_malloc(), possibly as an object with card marking enabled,
@@ -1241,6 +1258,13 @@
self.old_objects_with_cards_set.append(dest_addr)
dest_hdr.tid |= GCFLAG_CARDS_SET
+ def register_finalizer(self, fq_index, gcobj):
+ from rpython.rtyper.lltypesystem import rffi
+ obj = llmemory.cast_ptr_to_adr(gcobj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
+ self.probably_young_objects_with_finalizers.append(obj)
+ self.probably_young_objects_with_finalizers.append(fq_index)
+
# ----------
# Nursery collection
@@ -1264,6 +1288,11 @@
# 'old_objects_pointing_to_young'.
self.collect_roots_in_nursery()
#
+ # visit the "probably young" objects with finalizers. They
+ # always all survive.
+ if self.probably_young_objects_with_finalizers.non_empty():
+ self.deal_with_young_objects_with_finalizers()
+ #
while True:
# If we are using card marking, do a partial trace of the arrays
# that are flagged with GCFLAG_CARDS_SET.
@@ -1288,8 +1317,8 @@
# weakrefs' targets.
if self.young_objects_with_weakrefs.non_empty():
self.invalidate_young_weakrefs()
- if self.young_objects_with_light_finalizers.non_empty():
- self.deal_with_young_objects_with_finalizers()
+ if self.young_objects_with_destructors.non_empty():
+ self.deal_with_young_objects_with_destructors()
#
# Clear this mapping.
if self.nursery_objects_shadows.length() > 0:
@@ -1613,7 +1642,7 @@
# with a finalizer and all objects reachable from there (and also
# moves some objects from 'objects_with_finalizers' to
# 'run_finalizers').
- if self.objects_with_finalizers.non_empty():
+ if self.old_objects_with_finalizers.non_empty():
self.deal_with_objects_with_finalizers()
#
self.objects_to_trace.delete()
@@ -1621,8 +1650,8 @@
# Weakref support: clear the weak pointers to dying objects
if self.old_objects_with_weakrefs.non_empty():
self.invalidate_old_weakrefs()
- if self.old_objects_with_light_finalizers.non_empty():
- self.deal_with_old_objects_with_finalizers()
+ if self.old_objects_with_destructors.non_empty():
+ self.deal_with_old_objects_with_destructors()
#
# Walk all rawmalloced objects and free the ones that don't
@@ -1745,8 +1774,8 @@
#
# If we are in an inner collection caused by a call to a finalizer,
# the 'run_finalizers' objects also need to be kept alive.
- self.run_finalizers.foreach(self._collect_obj,
- self.objects_to_trace)
+ self.enum_pending_finalizers(self._collect_obj,
+ self.objects_to_trace)
def enumerate_all_roots(self, callback, arg):
self.prebuilt_root_objects.foreach(callback, arg)
@@ -1878,41 +1907,45 @@
# ----------
# Finalizers
- def deal_with_young_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_young_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
- while self.young_objects_with_light_finalizers.non_empty():
- obj = self.young_objects_with_light_finalizers.pop()
+ while self.young_objects_with_destructors.non_empty():
+ obj = self.young_objects_with_destructors.pop()
if not self.is_forwarded(obj):
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
+ self.call_destructor(obj)
else:
obj = self.get_forwarding_address(obj)
- self.old_objects_with_light_finalizers.append(obj)
+ self.old_objects_with_destructors.append(obj)
- def deal_with_old_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_old_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
new_objects = self.AddressStack()
- while self.old_objects_with_light_finalizers.non_empty():
- obj = self.old_objects_with_light_finalizers.pop()
+ while self.old_objects_with_destructors.non_empty():
+ obj = self.old_objects_with_destructors.pop()
if self.header(obj).tid & GCFLAG_VISITED:
# surviving
new_objects.append(obj)
else:
# dying
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
- self.old_objects_with_light_finalizers.delete()
- self.old_objects_with_light_finalizers = new_objects
+ self.call_destructor(obj)
+ self.old_objects_with_destructors.delete()
+ self.old_objects_with_destructors = new_objects
+
+ def deal_with_young_objects_with_finalizers(self):
+ while self.probably_young_objects_with_finalizers.non_empty():
+ obj = self.probably_young_objects_with_finalizers.popleft()
+ fq_nr = self.probably_young_objects_with_finalizers.popleft()
+ self.singleaddr.address[0] = obj
+ self._trace_drag_out1(self.singleaddr)
+ obj = self.singleaddr.address[0]
+ self.old_objects_with_finalizers.append(obj)
+ self.old_objects_with_finalizers.append(fq_nr)
def deal_with_objects_with_finalizers(self):
# Walk over list of objects with finalizers.
@@ -1925,14 +1958,17 @@
marked = self.AddressDeque()
pending = self.AddressStack()
self.tmpstack = self.AddressStack()
- while self.objects_with_finalizers.non_empty():
- x = self.objects_with_finalizers.popleft()
+ while self.old_objects_with_finalizers.non_empty():
+ x = self.old_objects_with_finalizers.popleft()
+ fq_nr = self.old_objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.header(x).tid & GCFLAG_VISITED:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
continue
marked.append(x)
+ marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
@@ -1946,22 +1982,26 @@
while marked.non_empty():
x = marked.popleft()
+ fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
if state == 2:
- self.run_finalizers.append(x)
+ from rpython.rtyper.lltypesystem import rffi
+ fq_index = rffi.cast(lltype.Signed, fq_nr)
+ self.mark_finalizer_to_run(fq_index, x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
marked.delete()
- self.objects_with_finalizers.delete()
- self.objects_with_finalizers = new_with_finalizer
+ self.old_objects_with_finalizers.delete()
+ self.old_objects_with_finalizers = new_with_finalizer
def _append_if_nonnull(pointer, stack):
stack.append(pointer.address[0])
From pypy.commits at gmail.com Wed May 4 12:50:05 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 09:50:05 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: fix
Message-ID: <572a283d.4d571c0a.fe2fe.ffffb258@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84195:a495ce740059
Date: 2016-05-04 18:50 +0200
http://bitbucket.org/pypy/pypy/changeset/a495ce740059/
Log: fix
diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py
--- a/rpython/memory/gc/minimark.py
+++ b/rpython/memory/gc/minimark.py
@@ -561,8 +561,6 @@
# Build the object.
llarena.arena_reserve(result, totalsize)
obj = result + size_gc_header
- if is_finalizer_light:
- self.young_objects_with_light_finalizers.append(obj)
self.init_gc_object(result, typeid, flags=0)
#
# If it is a weakref or has a lightweight destructor, record it
From pypy.commits at gmail.com Wed May 4 14:23:33 2016
From: pypy.commits at gmail.com (raff...@gmail.com)
Date: Wed, 04 May 2016 11:23:33 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-raffael_t: Remove unneccessary testfile
for matmul
Message-ID: <572a3e25.2179c20a.80082.ffffc8c5@mx.google.com>
Author: raffael.tfirst at gmail.com
Branch: py3.5-raffael_t
Changeset: r84196:17ac3f14e8ff
Date: 2016-05-04 20:22 +0200
http://bitbucket.org/pypy/pypy/changeset/17ac3f14e8ff/
Log: Remove unneccessary testfile for matmul
diff --git a/pypy/interpreter/test/test_35_mmult.py b/pypy/interpreter/test/test_35_mmult.py
deleted file mode 100644
--- a/pypy/interpreter/test/test_35_mmult.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import numpy
-
-x = numpy.ones(3)
-m = numpy.eye(3)
-
-a = x @ m
-
-print(a)
\ No newline at end of file
From pypy.commits at gmail.com Wed May 4 15:12:31 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Wed, 04 May 2016 12:12:31 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Merge py3.5-raffael_t into py3.5
Message-ID: <572a499f.22acc20a.bac01.ffffda70@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r84197:3007d740c2c9
Date: 2016-05-04 21:11 +0200
http://bitbucket.org/pypy/pypy/changeset/3007d740c2c9/
Log: Merge py3.5-raffael_t into py3.5
diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py
--- a/lib-python/3/opcode.py
+++ b/lib-python/3/opcode.py
@@ -85,10 +85,7 @@
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
-def_op('GET_AITER', 50)
-def_op('GET_ANEXT', 51)
-def_op('BEFORE_ASYNC_WITH', 52)
-
+def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
@@ -103,12 +100,11 @@
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
-def_op('GET_YIELD_FROM_ITER', 69)
+def_op('STORE_LOCALS', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
-def_op('GET_AWAITABLE', 73)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
@@ -116,8 +112,7 @@
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
-def_op('WITH_CLEANUP_START', 81)
-def_op('WITH_CLEANUP_FINISH', 82)
+def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
@@ -200,20 +195,9 @@
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
-def_op('LOAD_CLASSDEREF', 148)
-hasfree.append(148)
-
-jrel_op('SETUP_ASYNC_WITH', 154)
-
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
-def_op('BUILD_LIST_UNPACK', 149)
-def_op('BUILD_MAP_UNPACK', 150)
-def_op('BUILD_MAP_UNPACK_WITH_CALL', 151)
-def_op('BUILD_TUPLE_UNPACK', 152)
-def_op('BUILD_SET_UNPACK', 153)
-
# pypy modification, experimental bytecode
def_op('LOOKUP_METHOD', 201) # Index in name list
hasname.append(201)
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -557,6 +557,7 @@
ops.LIST_APPEND: -1,
ops.SET_ADD: -1,
ops.MAP_ADD: -2,
+ # XXX
ops.BINARY_POWER: -1,
ops.BINARY_MULTIPLY: -1,
@@ -566,6 +567,7 @@
ops.BINARY_SUBSCR: -1,
ops.BINARY_FLOOR_DIVIDE: -1,
ops.BINARY_TRUE_DIVIDE: -1,
+ ops.BINARY_MATRIX_MULTIPLY: -1,
ops.BINARY_LSHIFT: -1,
ops.BINARY_RSHIFT: -1,
ops.BINARY_AND: -1,
@@ -579,6 +581,7 @@
ops.INPLACE_MULTIPLY: -1,
ops.INPLACE_MODULO: -1,
ops.INPLACE_POWER: -1,
+ ops.INPLACE_MATRIX_MULTIPLY: -1,
ops.INPLACE_LSHIFT: -1,
ops.INPLACE_RSHIFT: -1,
ops.INPLACE_AND: -1,
@@ -613,6 +616,7 @@
ops.YIELD_FROM: -1,
ops.COMPARE_OP: -1,
+ # TODO
ops.LOOKUP_METHOD: 1,
ops.LOAD_NAME: 1,
@@ -649,8 +653,10 @@
ops.JUMP_IF_FALSE_OR_POP: 0,
ops.POP_JUMP_IF_TRUE: -1,
ops.POP_JUMP_IF_FALSE: -1,
+ # TODO
ops.JUMP_IF_NOT_DEBUG: 0,
+ # TODO
ops.BUILD_LIST_FROM_ARG: 1,
}
diff --git a/pypy/interpreter/astcompiler/assemble.py.orig b/pypy/interpreter/astcompiler/assemble.py.orig
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/astcompiler/assemble.py.orig
@@ -0,0 +1,765 @@
+"""Python control flow graph generation and bytecode assembly."""
+
+import os
+from rpython.rlib import rfloat
+from rpython.rlib.objectmodel import specialize, we_are_translated
+
+from pypy.interpreter.astcompiler import ast, consts, misc, symtable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.pycode import PyCode
+from pypy.tool import stdlib_opcode as ops
+
+
+class StackDepthComputationError(Exception):
+ pass
+
+
+class Instruction(object):
+ """Represents a single opcode."""
+
+ def __init__(self, opcode, arg=0):
+ self.opcode = opcode
+ self.arg = arg
+ self.lineno = 0
+ self.has_jump = False
+
+ def size(self):
+ """Return the size of bytes of this instruction when it is
+ encoded.
+ """
+ if self.opcode >= ops.HAVE_ARGUMENT:
+ return (6 if self.arg > 0xFFFF else 3)
+ return 1
+
+ def jump_to(self, target, absolute=False):
+ """Indicate the target this jump instruction.
+
+ The opcode must be a JUMP opcode.
+ """
+ self.jump = (target, absolute)
+ self.has_jump = True
+
+ def __repr__(self):
+ data = [ops.opname[self.opcode]]
+ template = "<%s"
+ if self.opcode >= ops.HAVE_ARGUMENT:
+ data.append(self.arg)
+ template += " %i"
+ if self.has_jump:
+ data.append(self.jump[0])
+ template += " %s"
+ template += ">"
+ return template % tuple(data)
+
+
+class Block(object):
+ """A basic control flow block.
+
+ It has one entry point and several possible exit points. Its
+ instructions may be jumps to other blocks, or if control flow
+ reaches the end of the block, it continues to next_block.
+ """
+
+ marked = False
+ have_return = False
+ auto_inserted_return = False
+
+ def __init__(self):
+ self.instructions = []
+ self.next_block = None
+
+ def _post_order_see(self, stack, nextblock):
+ if nextblock.marked == 0:
+ nextblock.marked = 1
+ stack.append(nextblock)
+
+ def post_order(self):
+ """Return this block and its children in post order. This means
+ that the graph of blocks is first cleaned up to ignore
+ back-edges, thus turning it into a DAG. Then the DAG is
+ linearized. For example:
+
+ A --> B -\ => [A, D, B, C]
+ \-> D ---> C
+ """
+ resultblocks = []
+ stack = [self]
+ self.marked = 1
+ while stack:
+ current = stack[-1]
+ if current.marked == 1:
+ current.marked = 2
+ if current.next_block is not None:
+ self._post_order_see(stack, current.next_block)
+ else:
+ i = current.marked - 2
+ assert i >= 0
+ while i < len(current.instructions):
+ instr = current.instructions[i]
+ i += 1
+ if instr.has_jump:
+ current.marked = i + 2
+ self._post_order_see(stack, instr.jump[0])
+ break
+ else:
+ resultblocks.append(current)
+ stack.pop()
+ resultblocks.reverse()
+ return resultblocks
+
+ def code_size(self):
+ """Return the encoded size of all the instructions in this
+ block.
+ """
+ i = 0
+ for instr in self.instructions:
+ i += instr.size()
+ return i
+
+ def get_code(self):
+ """Encode the instructions in this block into bytecode."""
+ code = []
+ for instr in self.instructions:
+ opcode = instr.opcode
+ if opcode >= ops.HAVE_ARGUMENT:
+ arg = instr.arg
+ if instr.arg > 0xFFFF:
+ ext = arg >> 16
+ code.append(chr(ops.EXTENDED_ARG))
+ code.append(chr(ext & 0xFF))
+ code.append(chr(ext >> 8))
+ arg &= 0xFFFF
+ code.append(chr(opcode))
+ code.append(chr(arg & 0xFF))
+ code.append(chr(arg >> 8))
+ else:
+ code.append(chr(opcode))
+ return ''.join(code)
+
+
+def _make_index_dict_filter(syms, flag):
+ i = 0
+ result = {}
+ for name, scope in syms.iteritems():
+ if scope == flag:
+ result[name] = i
+ i += 1
+ return result
+
+
+ at specialize.argtype(0)
+def _iter_to_dict(iterable, offset=0):
+ result = {}
+ index = offset
+ for item in iterable:
+ result[item] = index
+ index += 1
+ return result
+
+
+class PythonCodeMaker(ast.ASTVisitor):
+ """Knows how to assemble a PyCode object."""
+
+ def __init__(self, space, name, first_lineno, scope, compile_info):
+ self.space = space
+ self.name = name
+ self.first_lineno = first_lineno
+ self.compile_info = compile_info
+ self.first_block = self.new_block()
+ self.use_block(self.first_block)
+ self.names = {}
+ self.var_names = _iter_to_dict(scope.varnames)
+ self.cell_vars = _make_index_dict_filter(scope.symbols,
+ symtable.SCOPE_CELL)
+ self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars))
+ self.w_consts = space.newdict()
+ self.argcount = 0
+ self.kwonlyargcount = 0
+ self.lineno_set = False
+ self.lineno = 0
+ self.add_none_to_final_return = True
+
+ def new_block(self):
+ return Block()
+
+ def use_block(self, block):
+ """Start emitting bytecode into block."""
+ self.current_block = block
+ self.instrs = block.instructions
+
+ def use_next_block(self, block=None):
+ """Set this block as the next_block for the last and use it."""
+ if block is None:
+ block = self.new_block()
+ self.current_block.next_block = block
+ self.use_block(block)
+ return block
+
+ def is_dead_code(self):
+ """Return False if any code can be meaningfully added to the
+ current block, or True if it would be dead code."""
+ # currently only True after a RETURN_VALUE.
+ return self.current_block.have_return
+
+ def emit_op(self, op):
+ """Emit an opcode without an argument."""
+ instr = Instruction(op)
+ if not self.lineno_set:
+ instr.lineno = self.lineno
+ self.lineno_set = True
+ if not self.is_dead_code():
+ self.instrs.append(instr)
+ if op == ops.RETURN_VALUE:
+ self.current_block.have_return = True
+ return instr
+
+ def emit_op_arg(self, op, arg):
+ """Emit an opcode with an integer argument."""
+ instr = Instruction(op, arg)
+ if not self.lineno_set:
+ instr.lineno = self.lineno
+ self.lineno_set = True
+ if not self.is_dead_code():
+ self.instrs.append(instr)
+
+ def emit_op_name(self, op, container, name):
+ """Emit an opcode referencing a name."""
+ self.emit_op_arg(op, self.add_name(container, name))
+
+ def emit_jump(self, op, block_to, absolute=False):
+ """Emit a jump opcode to another block."""
+ self.emit_op(op).jump_to(block_to, absolute)
+
+ def add_name(self, container, name):
+ """Get the index of a name in container."""
+ name = self.scope.mangle(name)
+ try:
+ index = container[name]
+ except KeyError:
+ index = len(container)
+ container[name] = index
+ return index
+
+ def add_const(self, obj):
+ """Add a W_Root to the constant array and return its location."""
+ space = self.space
+ # To avoid confusing equal but separate types, we hash store the type
+ # of the constant in the dictionary. Moreover, we have to keep the
+ # difference between -0.0 and 0.0 floats, and this recursively in
+ # tuples.
+ w_key = self._make_key(obj)
+
+ w_len = space.finditem(self.w_consts, w_key)
+ if w_len is None:
+ w_len = space.len(self.w_consts)
+ space.setitem(self.w_consts, w_key, w_len)
+ if space.int_w(w_len) == 0:
+ self.scope.doc_removable = False
+ return space.int_w(w_len)
+
+ def _make_key(self, obj):
+ # see the tests 'test_zeros_not_mixed*' in ../test/test_compiler.py
+ space = self.space
+ w_type = space.type(obj)
+ if space.is_w(w_type, space.w_float):
+ val = space.float_w(obj)
+ if val == 0.0 and rfloat.copysign(1., val) < 0:
+ w_key = space.newtuple([obj, space.w_float, space.w_None])
+ else:
+ w_key = space.newtuple([obj, space.w_float])
+ elif space.is_w(w_type, space.w_complex):
+ w_real = space.getattr(obj, space.wrap("real"))
+ w_imag = space.getattr(obj, space.wrap("imag"))
+ real = space.float_w(w_real)
+ imag = space.float_w(w_imag)
+ real_negzero = (real == 0.0 and
+ rfloat.copysign(1., real) < 0)
+ imag_negzero = (imag == 0.0 and
+ rfloat.copysign(1., imag) < 0)
+ if real_negzero and imag_negzero:
+ tup = [obj, space.w_complex, space.w_None, space.w_None,
+ space.w_None]
+ elif imag_negzero:
+ tup = [obj, space.w_complex, space.w_None, space.w_None]
+ elif real_negzero:
+ tup = [obj, space.w_complex, space.w_None]
+ else:
+ tup = [obj, space.w_complex]
+ w_key = space.newtuple(tup)
+ elif space.is_w(w_type, space.w_tuple):
+ result_w = [obj, w_type]
+ for w_item in space.fixedview(obj):
+ result_w.append(self._make_key(w_item))
+ w_key = space.newtuple(result_w[:])
+ elif isinstance(obj, PyCode):
+ w_key = space.newtuple([obj, w_type, space.id(obj)])
+ else:
+ w_key = space.newtuple([obj, w_type])
+ return w_key
+
+ def load_const(self, obj):
+ index = self.add_const(obj)
+ self.emit_op_arg(ops.LOAD_CONST, index)
+
+ def update_position(self, lineno, force=False):
+ """Possibly change the lineno for the next instructions."""
+ if force or lineno > self.lineno:
+ self.lineno = lineno
+ self.lineno_set = False
+
+ def _resolve_block_targets(self, blocks):
+ """Compute the arguments of jump instructions."""
+ last_extended_arg_count = 0
+ # The reason for this loop is extended jumps. EXTENDED_ARG
+ # extends the bytecode size, so it might invalidate the offsets
+ # we've already given. Thus we have to loop until the number of
+ # extended args is stable. Any extended jump at all is
+ # extremely rare, so performance is not too concerning.
+ while True:
+ extended_arg_count = 0
+ offset = 0
+ force_redo = False
+ # Calculate the code offset of each block.
+ for block in blocks:
+ block.offset = offset
+ offset += block.code_size()
+ for block in blocks:
+ offset = block.offset
+ for instr in block.instructions:
+ offset += instr.size()
+ if instr.has_jump:
+ target, absolute = instr.jump
+ op = instr.opcode
+ # Optimize an unconditional jump going to another
+ # unconditional jump.
+ if op == ops.JUMP_ABSOLUTE or op == ops.JUMP_FORWARD:
+ if target.instructions:
+ target_op = target.instructions[0].opcode
+ if target_op == ops.JUMP_ABSOLUTE:
+ target = target.instructions[0].jump[0]
+ instr.opcode = ops.JUMP_ABSOLUTE
+ absolute = True
+ elif target_op == ops.RETURN_VALUE:
+ # Replace JUMP_* to a RETURN into
+ # just a RETURN
+ instr.opcode = ops.RETURN_VALUE
+ instr.arg = 0
+ instr.has_jump = False
+ # The size of the code changed,
+ # we have to trigger another pass
+ force_redo = True
+ continue
+ if absolute:
+ jump_arg = target.offset
+ else:
+ jump_arg = target.offset - offset
+ instr.arg = jump_arg
+ if jump_arg > 0xFFFF:
+ extended_arg_count += 1
+ if (extended_arg_count == last_extended_arg_count and
+ not force_redo):
+ break
+ else:
+ last_extended_arg_count = extended_arg_count
+
+ def _build_consts_array(self):
+ """Turn the applevel constants dictionary into a list."""
+ w_consts = self.w_consts
+ space = self.space
+ consts_w = [space.w_None] * space.len_w(w_consts)
+ w_iter = space.iter(w_consts)
+ first = space.wrap(0)
+ while True:
+ try:
+ w_key = space.next(w_iter)
+ except OperationError as e:
+ if not e.match(space, space.w_StopIteration):
+ raise
+ break
+ w_index = space.getitem(w_consts, w_key)
+ w_constant = space.getitem(w_key, first)
+ w_constant = misc.intern_if_common_string(space, w_constant)
+ consts_w[space.int_w(w_index)] = w_constant
+ return consts_w
+
+ def _get_code_flags(self):
+ """Get an extra flags that should be attached to the code object."""
+ raise NotImplementedError
+
+ def _stacksize(self, blocks):
+ """Compute co_stacksize."""
+ for block in blocks:
+ block.initial_depth = 0
+ # Assumes that it is sufficient to walk the blocks in 'post-order'.
+ # This means we ignore all back-edges, but apart from that, we only
+ # look into a block when all the previous blocks have been done.
+ self._max_depth = 0
+ for block in blocks:
+ depth = self._do_stack_depth_walk(block)
+ if block.auto_inserted_return and depth != 0:
+ os.write(2, "StackDepthComputationError in %s at %s:%s\n" % (
+ self.compile_info.filename, self.name, self.first_lineno))
+ raise StackDepthComputationError # fatal error
+ return self._max_depth
+
+ def _next_stack_depth_walk(self, nextblock, depth):
+ if depth > nextblock.initial_depth:
+ nextblock.initial_depth = depth
+
+ def _do_stack_depth_walk(self, block):
+ depth = block.initial_depth
+ for instr in block.instructions:
+ depth += _opcode_stack_effect(instr.opcode, instr.arg)
+ if depth >= self._max_depth:
+ self._max_depth = depth
+ jump_op = instr.opcode
+ if instr.has_jump:
+ target_depth = depth
+ if jump_op == ops.FOR_ITER:
+ target_depth -= 2
+ elif (jump_op == ops.SETUP_FINALLY or
+ jump_op == ops.SETUP_EXCEPT or
+ jump_op == ops.SETUP_WITH):
+ if jump_op == ops.SETUP_FINALLY:
+ target_depth += 4
+ elif jump_op == ops.SETUP_EXCEPT:
+ target_depth += 4
+ elif jump_op == ops.SETUP_WITH:
+ target_depth += 3
+ if target_depth > self._max_depth:
+ self._max_depth = target_depth
+ elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
+ jump_op == ops.JUMP_IF_FALSE_OR_POP):
+ depth -= 1
+ self._next_stack_depth_walk(instr.jump[0], target_depth)
+ if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
+ # Nothing more can occur.
+ break
+ elif jump_op == ops.RETURN_VALUE or jump_op == ops.RAISE_VARARGS:
+ # Nothing more can occur.
+ break
+ else:
+ if block.next_block:
+ self._next_stack_depth_walk(block.next_block, depth)
+ return depth
+
+ def _build_lnotab(self, blocks):
+ """Build the line number table for tracebacks and tracing."""
+ current_line = self.first_lineno
+ current_off = 0
+ table = []
+ push = table.append
+ for block in blocks:
+ offset = block.offset
+ for instr in block.instructions:
+ if instr.lineno:
+ # compute deltas
+ line = instr.lineno - current_line
+ if line < 0:
+ continue
+ addr = offset - current_off
+ # Python assumes that lineno always increases with
+ # increasing bytecode address (lnotab is unsigned
+ # char). Depending on when SET_LINENO instructions
+ # are emitted this is not always true. Consider the
+ # code:
+ # a = (1,
+ # b)
+ # In the bytecode stream, the assignment to "a"
+ # occurs after the loading of "b". This works with
+ # the C Python compiler because it only generates a
+ # SET_LINENO instruction for the assignment.
+ if line or addr:
+ while addr > 255:
+ push(chr(255))
+ push(chr(0))
+ addr -= 255
+ while line > 255:
+ push(chr(addr))
+ push(chr(255))
+ line -= 255
+ addr = 0
+ push(chr(addr))
+ push(chr(line))
+ current_line = instr.lineno
+ current_off = offset
+ offset += instr.size()
+ return ''.join(table)
+
+ def assemble(self):
+ """Build a PyCode object."""
+ # Unless it's interactive, every code object must end in a return.
+ if not self.current_block.have_return:
+ self.use_next_block()
+ if self.add_none_to_final_return:
+ self.load_const(self.space.w_None)
+ self.emit_op(ops.RETURN_VALUE)
+ self.current_block.auto_inserted_return = True
+ # Set the first lineno if it is not already explicitly set.
+ if self.first_lineno == -1:
+ if self.first_block.instructions:
+ self.first_lineno = self.first_block.instructions[0].lineno
+ else:
+ self.first_lineno = 1
+ blocks = self.first_block.post_order()
+ self._resolve_block_targets(blocks)
+ lnotab = self._build_lnotab(blocks)
+ stack_depth = self._stacksize(blocks)
+ consts_w = self._build_consts_array()
+ names = _list_from_dict(self.names)
+ var_names = _list_from_dict(self.var_names)
+ cell_names = _list_from_dict(self.cell_vars)
+ free_names = _list_from_dict(self.free_vars, len(cell_names))
+ flags = self._get_code_flags()
+ # (Only) inherit compilerflags in PyCF_MASK
+ flags |= (self.compile_info.flags & consts.PyCF_MASK)
+ bytecode = ''.join([block.get_code() for block in blocks])
+ return PyCode(self.space,
+ self.argcount,
+ self.kwonlyargcount,
+ len(self.var_names),
+ stack_depth,
+ flags,
+ bytecode,
+ list(consts_w),
+ names,
+ var_names,
+ self.compile_info.filename,
+ self.name,
+ self.first_lineno,
+ lnotab,
+ free_names,
+ cell_names,
+ self.compile_info.hidden_applevel)
+
+
+def _list_from_dict(d, offset=0):
+ result = [None] * len(d)
+ for obj, index in d.iteritems():
+ result[index - offset] = obj
+ return result
+
+
+_static_opcode_stack_effects = {
+ ops.NOP: 0,
+
+ ops.POP_TOP: -1,
+ ops.ROT_TWO: 0,
+ ops.ROT_THREE: 0,
+ ops.DUP_TOP: 1,
+ ops.DUP_TOP_TWO: 2,
+
+ ops.UNARY_POSITIVE: 0,
+ ops.UNARY_NEGATIVE: 0,
+ ops.UNARY_NOT: 0,
+ ops.UNARY_INVERT: 0,
+
+ ops.LIST_APPEND: -1,
+ ops.SET_ADD: -1,
+ ops.MAP_ADD: -2,
+<<<<<<< local
+=======
+ # XXX
+ ops.STORE_MAP: -2,
+>>>>>>> other
+
+ ops.BINARY_POWER: -1,
+ ops.BINARY_MULTIPLY: -1,
+ ops.BINARY_MODULO: -1,
+ ops.BINARY_ADD: -1,
+ ops.BINARY_SUBTRACT: -1,
+ ops.BINARY_SUBSCR: -1,
+ ops.BINARY_FLOOR_DIVIDE: -1,
+ ops.BINARY_TRUE_DIVIDE: -1,
+ ops.BINARY_MATRIX_MULTIPLY: -1,
+ ops.BINARY_LSHIFT: -1,
+ ops.BINARY_RSHIFT: -1,
+ ops.BINARY_AND: -1,
+ ops.BINARY_OR: -1,
+ ops.BINARY_XOR: -1,
+
+ ops.INPLACE_FLOOR_DIVIDE: -1,
+ ops.INPLACE_TRUE_DIVIDE: -1,
+ ops.INPLACE_ADD: -1,
+ ops.INPLACE_SUBTRACT: -1,
+ ops.INPLACE_MULTIPLY: -1,
+ ops.INPLACE_MODULO: -1,
+ ops.INPLACE_POWER: -1,
+ ops.INPLACE_MATRIX_MULTIPLY: -1,
+ ops.INPLACE_LSHIFT: -1,
+ ops.INPLACE_RSHIFT: -1,
+ ops.INPLACE_AND: -1,
+ ops.INPLACE_OR: -1,
+ ops.INPLACE_XOR: -1,
+
+ ops.STORE_SUBSCR: -3,
+ ops.DELETE_SUBSCR: -2,
+
+ ops.GET_ITER: 0,
+ ops.FOR_ITER: 1,
+ ops.BREAK_LOOP: 0,
+ ops.CONTINUE_LOOP: 0,
+ ops.SETUP_LOOP: 0,
+
+ ops.PRINT_EXPR: -1,
+
+<<<<<<< local
+ ops.WITH_CLEANUP_START: -1,
+ ops.WITH_CLEANUP_FINISH: -1, # XXX Sometimes more
+=======
+ # TODO
+ ops.WITH_CLEANUP: -1,
+>>>>>>> other
+ ops.LOAD_BUILD_CLASS: 1,
+<<<<<<< local
+=======
+ # TODO
+ ops.STORE_LOCALS: -1,
+>>>>>>> other
+ ops.POP_BLOCK: 0,
+ ops.POP_EXCEPT: -1,
+ ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY
+ # pushes 4. In truth, it would only push 1 and
+ # the corresponding END_FINALLY only pops 1.
+ ops.SETUP_WITH: 1,
+ ops.SETUP_FINALLY: 0,
+ ops.SETUP_EXCEPT: 0,
+
+ ops.RETURN_VALUE: -1,
+ ops.YIELD_VALUE: 0,
+ ops.YIELD_FROM: -1,
+ ops.COMPARE_OP: -1,
+
+ # TODO
+ ops.LOOKUP_METHOD: 1,
+
+ ops.LOAD_NAME: 1,
+ ops.STORE_NAME: -1,
+ ops.DELETE_NAME: 0,
+
+ ops.LOAD_FAST: 1,
+ ops.STORE_FAST: -1,
+ ops.DELETE_FAST: 0,
+
+ ops.LOAD_ATTR: 0,
+ ops.STORE_ATTR: -2,
+ ops.DELETE_ATTR: -1,
+
+ ops.LOAD_GLOBAL: 1,
+ ops.STORE_GLOBAL: -1,
+ ops.DELETE_GLOBAL: 0,
+ ops.DELETE_DEREF: 0,
+
+ ops.LOAD_CLOSURE: 1,
+ ops.LOAD_DEREF: 1,
+ ops.STORE_DEREF: -1,
+ ops.DELETE_DEREF: 0,
+
+ ops.LOAD_CONST: 1,
+
+ ops.IMPORT_STAR: -1,
+ ops.IMPORT_NAME: -1,
+ ops.IMPORT_FROM: 1,
+
+ ops.JUMP_FORWARD: 0,
+ ops.JUMP_ABSOLUTE: 0,
+ ops.JUMP_IF_TRUE_OR_POP: 0,
+ ops.JUMP_IF_FALSE_OR_POP: 0,
+ ops.POP_JUMP_IF_TRUE: -1,
+ ops.POP_JUMP_IF_FALSE: -1,
+ # TODO
+ ops.JUMP_IF_NOT_DEBUG: 0,
+
+ # TODO
+ ops.BUILD_LIST_FROM_ARG: 1,
+}
+
+
+def _compute_UNPACK_SEQUENCE(arg):
+ return arg - 1
+
+def _compute_UNPACK_EX(arg):
+ return (arg & 0xFF) + (arg >> 8)
+
+def _compute_BUILD_TUPLE(arg):
+ return 1 - arg
+
+def _compute_BUILD_LIST(arg):
+ return 1 - arg
+
+def _compute_BUILD_SET(arg):
+ return 1 - arg
+
+def _compute_BUILD_MAP(arg):
+ return 1 - 2 * arg
+
+def _compute_BUILD_MAP_UNPACK(arg):
+ return 1 - arg
+
+def _compute_MAKE_CLOSURE(arg):
+ return -2 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
+
+def _compute_MAKE_FUNCTION(arg):
+ return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
+
+def _compute_BUILD_SLICE(arg):
+ if arg == 3:
+ return -2
+ else:
+ return -1
+
+def _compute_RAISE_VARARGS(arg):
+ return -arg
+
+def _num_args(oparg):
+ return (oparg % 256) + 2 * ((oparg // 256) % 256)
+
+def _compute_CALL_FUNCTION(arg):
+ return -_num_args(arg)
+
+def _compute_CALL_FUNCTION_VAR(arg):
+ return -_num_args(arg) - 1
+
+def _compute_CALL_FUNCTION_KW(arg):
+ return -_num_args(arg) - 1
+
+def _compute_CALL_FUNCTION_VAR_KW(arg):
+ return -_num_args(arg) - 2
+
+def _compute_CALL_METHOD(arg):
+ return -_num_args(arg) - 1
+
+
+_stack_effect_computers = {}
+for name, func in globals().items():
+ if name.startswith("_compute_"):
+ func._always_inline_ = True
+ _stack_effect_computers[getattr(ops, name[9:])] = func
+for op, value in _static_opcode_stack_effects.iteritems():
+ def func(arg, _value=value):
+ return _value
+ func._always_inline_ = True
+ _stack_effect_computers[op] = func
+del name, func, op, value
+
+
+def _opcode_stack_effect(op, arg):
+ """Return the stack effect of a opcode an its argument."""
+ if we_are_translated():
+ for possible_op in ops.unrolling_opcode_descs:
+ # EXTENDED_ARG should never get in here.
+ if possible_op.index == ops.EXTENDED_ARG:
+ continue
+ if op == possible_op.index:
+ return _stack_effect_computers[possible_op.index](arg)
+ else:
+ raise AssertionError("unknown opcode: %s" % (op,))
+ else:
+ try:
+ return _static_opcode_stack_effects[op]
+ except KeyError:
+ try:
+ return _stack_effect_computers[op](arg)
+ except KeyError:
+ raise KeyError("Unknown stack effect for %s (%s)" %
+ (ops.opname[op], op))
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -2970,6 +2970,8 @@
return 11
if space.isinstance_w(w_node, get(space).w_FloorDiv):
return 12
+ if space.isinstance_w(w_node, get(space).w_MatMul):
+ return 13
raise oefmt(space.w_TypeError,
"Expected operator node, got %T", w_node)
State.ast_type('operator', 'AST', None)
@@ -3034,6 +3036,11 @@
return space.call_function(get(space).w_FloorDiv)
State.ast_type('FloorDiv', 'operator', None)
+class _MatMul(operator):
+ def to_object(self, space):
+ return space.call_function(get(space).w_MatMul)
+State.ast_type('MatMul', 'operator', None)
+
Add = 1
Sub = 2
Mult = 3
@@ -3046,6 +3053,7 @@
BitXor = 10
BitAnd = 11
FloorDiv = 12
+MatMul = 13
operator_to_class = [
_Add,
@@ -3060,6 +3068,7 @@
_BitXor,
_BitAnd,
_FloorDiv,
+ _MatMul,
]
class unaryop(AST):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -17,6 +17,7 @@
'/=' : ast.Div,
'//=' : ast.FloorDiv,
'%=' : ast.Mod,
+ '@=' : ast.MatMul,
'<<=' : ast.LShift,
'>>=' : ast.RShift,
'&=' : ast.BitAnd,
@@ -37,7 +38,8 @@
tokens.STAR : ast.Mult,
tokens.SLASH : ast.Div,
tokens.DOUBLESLASH : ast.FloorDiv,
- tokens.PERCENT : ast.Mod
+ tokens.PERCENT : ast.Mod,
+ tokens.AT : ast.MatMul
})
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -65,7 +65,8 @@
ast.BitOr: ops.BINARY_OR,
ast.BitAnd: ops.BINARY_AND,
ast.BitXor: ops.BINARY_XOR,
- ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE
+ ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE,
+ ast.MatMul: ops.BINARY_MATRIX_MULTIPLY
})
inplace_operations = misc.dict_to_switch({
@@ -80,7 +81,8 @@
ast.BitOr: ops.INPLACE_OR,
ast.BitAnd: ops.INPLACE_AND,
ast.BitXor: ops.INPLACE_XOR,
- ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE
+ ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE,
+ ast.MatMul: ops.INPLACE_MATRIX_MULTIPLY
})
compare_operations = misc.dict_to_switch({
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -134,6 +134,7 @@
ast.BitOr : _binary_fold("or_"),
ast.BitXor : _binary_fold("xor"),
ast.BitAnd : _binary_fold("and_"),
+ ast.MatMul : _binary_fold("matmul"),
}
unrolling_binary_folders = unrolling_iterable(binary_folders.items())
diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl
--- a/pypy/interpreter/astcompiler/tools/Python.asdl
+++ b/pypy/interpreter/astcompiler/tools/Python.asdl
@@ -95,7 +95,7 @@
boolop = And | Or
operator = Add | Sub | Mult | Div | Mod | Pow | LShift
- | RShift | BitOr | BitXor | BitAnd | FloorDiv
+ | RShift | BitOr | BitXor | BitAnd | FloorDiv | MatMul
unaryop = Invert | Not | UAdd | USub
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1891,6 +1891,8 @@
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
('userdel', 'del', 1, ['__del__']),
+ ('matmul', '@', 2, ['__matmul__', '__rmatmul__']),
+ ('inplace_matmul', '@=', 2, ['__imatmul__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -228,6 +228,8 @@
self.BINARY_AND(oparg, next_instr)
elif opcode == opcodedesc.BINARY_FLOOR_DIVIDE.index:
self.BINARY_FLOOR_DIVIDE(oparg, next_instr)
+ elif opcode == opcodedesc.BINARY_MATRIX_MULTIPLY.index:
+ self.BINARY_MATRIX_MULTIPLY(oparg, next_instr)
elif opcode == opcodedesc.BINARY_LSHIFT.index:
self.BINARY_LSHIFT(oparg, next_instr)
elif opcode == opcodedesc.BINARY_MODULO.index:
@@ -571,6 +573,7 @@
BINARY_DIVIDE = binaryoperation("div")
# XXX BINARY_DIVIDE must fall back to BINARY_TRUE_DIVIDE with -Qnew
BINARY_MODULO = binaryoperation("mod")
+ BINARY_MATRIX_MULTIPLY = binaryoperation("matmul")
BINARY_ADD = binaryoperation("add")
BINARY_SUBTRACT = binaryoperation("sub")
BINARY_SUBSCR = binaryoperation("getitem")
@@ -589,9 +592,11 @@
INPLACE_MULTIPLY = binaryoperation("inplace_mul")
INPLACE_TRUE_DIVIDE = binaryoperation("inplace_truediv")
INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_floordiv")
+ INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_matmul")
INPLACE_DIVIDE = binaryoperation("inplace_div")
# XXX INPLACE_DIVIDE must fall back to INPLACE_TRUE_DIVIDE with -Qnew
INPLACE_MODULO = binaryoperation("inplace_mod")
+ INPLACE_MATRIX_MULTIPLY = binaryoperation("inplace_matmul")
INPLACE_ADD = binaryoperation("inplace_add")
INPLACE_SUBTRACT = binaryoperation("inplace_sub")
INPLACE_LSHIFT = binaryoperation("inplace_lshift")
diff --git a/pypy/interpreter/pyparser/data/Grammar3.5 b/pypy/interpreter/pyparser/data/Grammar3.5
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/pyparser/data/Grammar3.5
@@ -0,0 +1,159 @@
+# Grammar for Python
+
+# Note: Changing the grammar specified in this file will most likely
+# require corresponding changes in the parser module
+# (../Modules/parsermodule.c). If you can't make the changes to
+# that module yourself, please co-ordinate the required changes
+# with someone who can; ask around on python-dev for help. Fred
+# Drake will probably be listening there.
+
+# NOTE WELL: You should also follow all the steps listed at
+# https://docs.python.org/devguide/grammar.html
+
+# Start symbols for the grammar:
+# single_input is a single interactive statement;
+# file_input is a module or sequence of commands read from an input file;
+# eval_input is the input for the eval() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+file_input: (NEWLINE | stmt)* ENDMARKER
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef)
+# | async_funcdef)
+
+# async_funcdef: ASYNC funcdef
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+
+parameters: '(' [typedargslist] ')'
+typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [','
+ ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]]
+ | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
+tfpdef: NAME [':' test]
+varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
+ ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]]
+ | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
+vfpdef: NAME
+
+stmt: simple_stmt | compound_stmt
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
+ import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
+expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
+ ('=' (yield_expr|testlist_star_expr))*)
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+ '<<=' | '>>=' | '**=' | '//=')
+# For normal assignments, additional restrictions enforced by the interpreter
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
+import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
+ 'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: 'global' NAME (',' NAME)*
+nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
+# | async_stmt
+# async_stmt: ASYNC (funcdef | with_stmt | for_stmt)
+if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+while_stmt: 'while' test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+ ((except_clause ':' suite)+
+ ['else' ':' suite]
+ ['finally' ':' suite] |
+ 'finally' ':' suite))
+with_stmt: 'with' with_item (',' with_item)* ':' suite
+with_item: test ['as' expr]
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test ['as' NAME]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+test: or_test ['if' or_test 'else' test] | lambdef
+test_nocond: or_test | lambdef_nocond
+lambdef: 'lambda' [varargslist] ':' test
+lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+# <> isn't actually a valid comparison operator in Python. It's here for the
+# sake of a __future__ import described in PEP 401 (which really works :-)
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+# power: atom_expr ['**' factor]
+power: atom trailer* ['**' factor]
+# atom_expr: [AWAIT] atom trailer*
+atom: ('(' [yield_expr|testlist_comp] ')' |
+ '[' [testlist_comp] ']' |
+ '{' [dictorsetmaker] '}' |
+ NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False')
+testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+testlist: test (',' test)* [',']
+dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
+ (test (comp_for | (',' test)* [','])) )
+#dictorsetmaker: ( ((test ':' test | '**' expr)
+# (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+# ((test | star_expr)
+# (comp_for | (',' (test | star_expr))* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: (argument ',')* (argument [',']
+ |'*' test (',' argument)* [',' '**' test]
+ |'**' test)
+#arglist: argument (',' argument)* [',']
+
+# The reason that keywords are test nodes instead of NAME is that using NAME
+# results in an ambiguity. ast.c makes sure it's a NAME.
+# "test '=' test" is really "keyword '=' test", but we have no such token.
+# These need to be in a single rule to avoid grammar that is ambiguous
+# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
+# we explicitly match '*' here, too, to give it proper precedence.
+# Illegal combinations and orderings are blocked in ast.c:
+# multiple (test comp_for) arguements are blocked; keyword unpackings
+# that precede iterable unpackings are blocked; etc.
+argument: test [comp_for] | test '=' test # Really [keyword '='] test
+#argument: ( test [comp_for] |
+# test '=' test |
+# '**' test |
+# '*' test )
+
+comp_iter: comp_for | comp_if
+comp_for: 'for' exprlist 'in' or_test [comp_iter]
+comp_if: 'if' test_nocond [comp_iter]
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist
diff --git a/pypy/interpreter/pyparser/pygram.py b/pypy/interpreter/pyparser/pygram.py
--- a/pypy/interpreter/pyparser/pygram.py
+++ b/pypy/interpreter/pyparser/pygram.py
@@ -9,7 +9,7 @@
def _get_python_grammar():
here = os.path.dirname(__file__)
- fp = open(os.path.join(here, "data", "Grammar3.3"))
+ fp = open(os.path.join(here, "data", "Grammar3.5"))
try:
gram_source = fp.read()
finally:
diff --git a/pypy/interpreter/pyparser/pytoken.py b/pypy/interpreter/pyparser/pytoken.py
--- a/pypy/interpreter/pyparser/pytoken.py
+++ b/pypy/interpreter/pyparser/pytoken.py
@@ -61,6 +61,7 @@
_add_tok('DOUBLESLASH', "//" )
_add_tok('DOUBLESLASHEQUAL',"//=" )
_add_tok('AT', "@" )
+_add_tok('ATEQUAL', "@=" )
_add_tok('RARROW', "->")
_add_tok('ELLIPSIS', "...")
_add_tok('OP')
diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py
--- a/pypy/module/cpyext/number.py
+++ b/pypy/module/cpyext/number.py
@@ -95,6 +95,7 @@
('Xor', 'xor'),
('Or', 'or_'),
('Divmod', 'divmod'),
+ ('MatrixMultiply', 'matmul')
]:
make_numbermethod(name, spacemeth)
if name != 'Divmod':
diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py
--- a/pypy/module/operator/__init__.py
+++ b/pypy/module/operator/__init__.py
@@ -28,7 +28,7 @@
'le', 'lshift', 'lt', 'mod', 'mul',
'ne', 'neg', 'not_', 'or_',
'pos', 'pow', 'rshift', 'setitem',
- 'sub', 'truediv', 'truth', 'xor',
+ 'sub', 'truediv', 'matmul', 'truth', 'xor',
'iadd', 'iand', 'iconcat', 'ifloordiv',
'ilshift', 'imod', 'imul', 'ior', 'ipow',
'irshift', 'isub', 'itruediv', 'ixor', '_length_hint',
@@ -72,6 +72,7 @@
'__sub__' : 'sub',
'__truediv__' : 'truediv',
'__xor__' : 'xor',
+ '__matmul__' : 'matmul',
# in-place
'__iadd__' : 'iadd',
'__iand__' : 'iand',
diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py
--- a/pypy/module/operator/interp_operator.py
+++ b/pypy/module/operator/interp_operator.py
@@ -143,6 +143,10 @@
'xor(a, b) -- Same as a ^ b.'
return space.xor(w_a, w_b)
+def matmul(space, w_a, w_b):
+ 'matmul(a, b) -- Same as a @ b.'
+ return space.matmul(w_a, w_b)
+
# in-place operations
def iadd(space, w_obj1, w_obj2):
@@ -193,6 +197,10 @@
'ixor(a, b) -- Same as a ^= b.'
return space.inplace_xor(w_a, w_b)
+def imatmul(space, w_a, w_b):
+ 'imatmul(a, b) -- Same as a @= b.'
+ return space.inplace_matmul(w_a, w_b)
+
def iconcat(space, w_obj1, w_obj2):
'iconcat(a, b) -- Same as a += b, for a and b sequences.'
if (space.lookup(w_obj1, '__getitem__') is None or
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -6,7 +6,7 @@
from pypy.interpreter import gateway
#XXX # the release serial 42 is not in range(16)
-CPYTHON_VERSION = (3, 3, 5, "final", 0)
+CPYTHON_VERSION = (3, 5, 1, "final", 0)
#XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h
diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py
--- a/pypy/objspace/std/intobject.py
+++ b/pypy/objspace/std/intobject.py
@@ -275,6 +275,7 @@
descr_add, descr_radd = _abstract_binop('add')
descr_sub, descr_rsub = _abstract_binop('sub')
descr_mul, descr_rmul = _abstract_binop('mul')
+ descr_matmul, descr_rmatmul = _abstract_binop('matmul')
descr_and, descr_rand = _abstract_binop('and')
descr_or, descr_ror = _abstract_binop('or')
diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py
--- a/pypy/objspace/std/util.py
+++ b/pypy/objspace/std/util.py
@@ -15,7 +15,7 @@
BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>',
'xor': '^'}
BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-',
- truediv='/', **BINARY_BITWISE_OPS)
+ truediv='/', matmul='@', **BINARY_BITWISE_OPS)
COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor')
diff --git a/pypy/tool/opcode3.py b/pypy/tool/opcode3.py
--- a/pypy/tool/opcode3.py
+++ b/pypy/tool/opcode3.py
@@ -5,6 +5,7 @@
"Backported" from Python 3 to Python 2 land - an excact copy of lib-python/3/opcode.py
"""
+
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
From pypy.commits at gmail.com Wed May 4 15:57:50 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 12:57:50 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: test fix
Message-ID: <572a543e.cb9a1c0a.50386.3429@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84198:70d8ebd681b1
Date: 2016-05-04 21:54 +0200
http://bitbucket.org/pypy/pypy/changeset/70d8ebd681b1/
Log: test fix
diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -8,7 +8,7 @@
import py
from rpython.rtyper.lltypesystem import lltype, llmemory
-from rpython.memory.gctypelayout import TypeLayoutBuilder
+from rpython.memory.gctypelayout import TypeLayoutBuilder, FIN_HANDLER_ARRAY
from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int
from rpython.memory.gc import minimark, incminimark
from rpython.memory.gctypelayout import zero_gc_pointers_inside, zero_gc_pointers
@@ -84,7 +84,9 @@
self.gc.set_root_walker(self.rootwalker)
self.layoutbuilder = TypeLayoutBuilder(self.GCClass)
self.get_type_id = self.layoutbuilder.get_type_id
- self.layoutbuilder.initialize_gc_query_function(self.gc)
+ gcdata = self.layoutbuilder.initialize_gc_query_function(self.gc)
+ ll_handlers = lltype.malloc(FIN_HANDLER_ARRAY, 0, immortal=True)
+ gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
self.gc.setup()
def consider_constant(self, p):
From pypy.commits at gmail.com Wed May 4 17:39:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 14:39:43 -0700 (PDT)
Subject: [pypy-commit] pypy default: mention branch,
probably doesn't need a whatsnew entry
Message-ID: <572a6c1f.878d1c0a.5d5f1.148f@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84199:64206eee56b3
Date: 2016-05-04 23:39 +0200
http://bitbucket.org/pypy/pypy/changeset/64206eee56b3/
Log: mention branch, probably doesn't need a whatsnew entry
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -66,3 +66,5 @@
Get the cpyext tests to pass with "-A" (i.e. when tested directly with
CPython).
+
+.. branch: oefmt
From pypy.commits at gmail.com Wed May 4 18:19:24 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Wed, 04 May 2016 15:19:24 -0700 (PDT)
Subject: [pypy-commit] pypy unpacking-cpython-shortcut: Copy CPython's
'optimization': ignore __iter__ etc. for f(**dict_subclass())
Message-ID: <572a756c.d5da1c0a.1323d.607a@mx.google.com>
Author: Devin Jeanpierre
Branch: unpacking-cpython-shortcut
Changeset: r84200:4c464c5704eb
Date: 2016-05-04 15:08 -0700
http://bitbucket.org/pypy/pypy/changeset/4c464c5704eb/
Log: Copy CPython's 'optimization': ignore __iter__ etc. for
f(**dict_subclass())
Super unfamiliar with this code, could be we can clean up the
isinstance check below, or it could be that this is unsafe. :S!
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -688,3 +688,21 @@
def f(x): pass
e = raises(TypeError, "f(**{u'ü' : 19})")
assert "?" in str(e.value)
+
+ def test_starstarargs_dict_subclass(self):
+ def f(**kwargs):
+ return kwargs
+ class DictSubclass(dict):
+ def __iter__(self):
+ yield 'x'
+ # CPython, as an optimization, looks directly into dict internals when
+ # passing one via **kwargs.
+ x =DictSubclass()
+ assert f(**x) == {}
+ x['a'] = 1
+ assert f(**x) == {'a': 1}
+
+ def test_starstarargs_module_dict(self):
+ def f(**kwargs):
+ return kwargs
+ assert f(**globals()) == globals()
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -483,7 +483,7 @@
return None
def view_as_kwargs(self, w_dict):
- if type(w_dict) is W_DictObject:
+ if isinstance(w_dict, W_DictObject):
return w_dict.view_as_kwargs()
return (None, None)
From pypy.commits at gmail.com Wed May 4 19:25:30 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 16:25:30 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: fix unicode handling
Message-ID: <572a84ea.171d1c0a.9ac59.0b6c@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84201:d5f860dfb191
Date: 2016-05-04 16:23 -0700
http://bitbucket.org/pypy/pypy/changeset/d5f860dfb191/
Log: fix unicode handling
diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py
--- a/pypy/objspace/std/formatting.py
+++ b/pypy/objspace/std/formatting.py
@@ -324,19 +324,10 @@
def unknown_fmtchar(self):
space = self.space
c = self.fmt[self.fmtpos - 1]
- if do_unicode:
- w_defaultencoding = space.call_function(
- space.sys.get('getdefaultencoding'))
- w_s = space.call_method(space.wrap(c),
- "encode",
- w_defaultencoding,
- space.wrap('replace'))
- s = space.str_w(w_s)
- else:
- s = c
+ w_s = space.wrap(c) if do_unicode else space.wrapbytes(c)
raise oefmt(space.w_ValueError,
- "unsupported format character '%s' (%s) at index %d",
- s, hex(ord(c)), self.fmtpos - 1)
+ "unsupported format character %R (%s) at index %d",
+ w_s, hex(ord(c)), self.fmtpos - 1)
def std_wp(self, r):
length = len(r)
From pypy.commits at gmail.com Wed May 4 19:32:58 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 16:32:58 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: backout 25e7ce4956dd -- let's see if it's
still necessary for py3k after the
Message-ID: <572a86aa.0c2e1c0a.d4e63.0b3b@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84202:ba47fac77ffc
Date: 2016-05-04 16:32 -0700
http://bitbucket.org/pypy/pypy/changeset/ba47fac77ffc/
Log: backout 25e7ce4956dd -- let's see if it's still necessary for py3k
after the recent methodcache fixes
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -445,7 +445,7 @@
cached_version_tag = cache.versions[method_hash]
if cached_version_tag is version_tag:
cached_name = cache.names[method_hash]
- if cached_name == name:
+ if cached_name is name:
tup = cache.lookup_where[method_hash]
if space.config.objspace.std.withmethodcachecounter:
cache.hits[name] = cache.hits.get(name, 0) + 1
From pypy.commits at gmail.com Wed May 4 22:51:24 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 19:51:24 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: reapply lost 2.7/3.2 workarounds
Message-ID: <572ab52c.8344c20a.2d101.5136@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84204:f31efe7d13cf
Date: 2016-05-04 19:48 -0700
http://bitbucket.org/pypy/pypy/changeset/f31efe7d13cf/
Log: reapply lost 2.7/3.2 workarounds
diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py
--- a/lib-python/3/test/test_descr.py
+++ b/lib-python/3/test/test_descr.py
@@ -1782,7 +1782,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "__next__" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1798,6 +1797,8 @@
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if not hasattr(sys, 'getsizeof') and support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1960,7 +1961,8 @@
except TypeError as msg:
self.assertIn("weak reference", str(msg))
else:
- self.fail("weakref.ref(no) should be illegal")
+ if support.check_impl_detail(pypy=False):
+ self.fail("weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
@@ -4300,14 +4302,10 @@
self.assertNotEqual(l.__add__, [5].__add__)
self.assertNotEqual(l.__add__, l.__mul__)
self.assertEqual(l.__add__.__name__, '__add__')
- if hasattr(l.__add__, '__self__'):
+ self.assertIs(l.__add__.__self__, l)
+ if hasattr(l.__add__, '__objclass__'):
# CPython
- self.assertIs(l.__add__.__self__, l)
self.assertIs(l.__add__.__objclass__, list)
- else:
- # Python implementations where [].__add__ is a normal bound method
- self.assertIs(l.__add__.im_self, l)
- self.assertIs(l.__add__.im_class, list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
From pypy.commits at gmail.com Wed May 4 22:51:26 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 19:51:26 -0700 (PDT)
Subject: [pypy-commit] pypy default: __length_hint__ now supported,
sync w/ py3k
Message-ID: <572ab52e.01341c0a.82308.2e5e@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84205:72a9e8ec895d
Date: 2016-05-04 19:49 -0700
http://bitbucket.org/pypy/pypy/changeset/72a9e8ec895d/
Log: __length_hint__ now supported, sync w/ py3k
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if not hasattr(sys, 'getsizeof') and test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
From pypy.commits at gmail.com Wed May 4 22:51:22 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 19:51:22 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: add __dict__ to class/staticmethod
Message-ID: <572ab52a.08121c0a.1dacd.2f80@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84203:6cac09131559
Date: 2016-05-04 18:29 -0700
http://bitbucket.org/pypy/pypy/changeset/6cac09131559/
Log: add __dict__ to class/staticmethod
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -593,6 +593,19 @@
def __init__(self, w_function):
self.w_function = w_function
+ self.w_dict = None
+
+ def getdict(self, space):
+ if self.w_dict is None:
+ self.w_dict = space.newdict(instance=True)
+ return self.w_dict
+
+ def setdict(self, space, w_dict):
+ if not space.isinstance_w(w_dict, space.w_dict):
+ raise oefmt(space.w_TypeError,
+ "__dict__ must be set to a dictionary, not a %T",
+ w_dict)
+ self.w_dict = w_dict
def descr_staticmethod_get(self, w_obj, w_cls=None):
"""staticmethod(x).__get__(obj[, type]) -> x"""
@@ -613,6 +626,19 @@
def __init__(self, w_function):
self.w_function = w_function
+ self.w_dict = None
+
+ def getdict(self, space):
+ if self.w_dict is None:
+ self.w_dict = space.newdict(instance=True)
+ return self.w_dict
+
+ def setdict(self, space, w_dict):
+ if not space.isinstance_w(w_dict, space.w_dict):
+ raise oefmt(space.w_TypeError,
+ "__dict__ must be set to a dictionary, not a %T",
+ w_dict)
+ self.w_dict = w_dict
def descr_classmethod_get(self, space, w_obj, w_klass=None):
if space.is_none(w_klass):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -739,6 +739,8 @@
__new__ = interp2app(StaticMethod.descr_staticmethod__new__.im_func),
__func__= interp_attrproperty_w('w_function', cls=StaticMethod),
__isabstractmethod__ = GetSetProperty(StaticMethod.descr_isabstract),
+ __dict__ = GetSetProperty(descr_get_dict, descr_set_dict,
+ cls=StaticMethod),
)
ClassMethod.typedef = TypeDef(
@@ -747,6 +749,7 @@
__get__ = interp2app(ClassMethod.descr_classmethod_get),
__func__= interp_attrproperty_w('w_function', cls=ClassMethod),
__isabstractmethod__ = GetSetProperty(ClassMethod.descr_isabstract),
+ __dict__ = GetSetProperty(descr_get_dict, descr_set_dict, cls=ClassMethod),
__doc__ = """classmethod(function) -> class method
Convert a function to be a class method.
diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py
--- a/pypy/module/__builtin__/test/test_descriptor.py
+++ b/pypy/module/__builtin__/test/test_descriptor.py
@@ -14,6 +14,17 @@
assert d.f("abc", "def") == "abcdef"
assert D.f("abc", "def") == "abcdef"
+ def test_staticmethod_dict(self):
+ sm = staticmethod(None)
+ assert sm.__dict__ == {}
+ sm.x = 42
+ assert sm.x == 42
+ assert sm.__dict__ == {"x" : 42}
+ del sm.x
+ assert not hasattr(sm, "x")
+ raises(TypeError, setattr, sm, '__dict__', [])
+ raises((AttributeError, TypeError), delattr, sm, '__dict__')
+
def test_staticmethod_subclass(self):
class Static(staticmethod):
pass
@@ -266,6 +277,20 @@
meth = classmethod(1).__get__(1)
raises(TypeError, meth)
+ def test_classmethod_dict(self):
+ cm = classmethod(None)
+ assert cm.__dict__ == {}
+ cm.x = 42
+ assert cm.x == 42
+ assert cm.__dict__ == {"x": 42}
+ del cm.x
+ assert not hasattr(cm, "x")
+ cm.x = 42
+ cm.__dict__ = {}
+ assert not hasattr(cm, "x")
+ raises(TypeError, setattr, cm, '__dict__', [])
+ raises((AttributeError, TypeError), delattr, cm, '__dict__')
+
def test_super_thisclass(self):
class A(object):
pass
From pypy.commits at gmail.com Wed May 4 22:59:39 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 19:59:39 -0700 (PDT)
Subject: [pypy-commit] pypy default: oops
Message-ID: <572ab71b.508e1c0a.73b66.309f@mx.google.com>
Author: Philip Jenvey
Branch:
Changeset: r84206:ff72a5d6a0cc
Date: 2016-05-04 19:58 -0700
http://bitbucket.org/pypy/pypy/changeset/ff72a5d6a0cc/
Log: oops
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1746,7 +1746,7 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
- if not hasattr(sys, 'getsizeof') and test_support.check_impl_detail():
+ if test_support.check_impl_detail():
specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
From pypy.commits at gmail.com Wed May 4 22:59:41 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Wed, 04 May 2016 19:59:41 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: oops
Message-ID: <572ab71d.a16ec20a.8d30e.4577@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84207:aba8656cdef4
Date: 2016-05-04 19:58 -0700
http://bitbucket.org/pypy/pypy/changeset/aba8656cdef4/
Log: oops
diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py
--- a/lib-python/3/test/test_descr.py
+++ b/lib-python/3/test/test_descr.py
@@ -1797,7 +1797,7 @@
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
- if not hasattr(sys, 'getsizeof') and support.check_impl_detail():
+ if support.check_impl_detail():
specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
From pypy.commits at gmail.com Thu May 5 02:55:30 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 23:55:30 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: Another passing test
Message-ID: <572aee62.0e711c0a.a9c4f.6751@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84208:aa2b2343b111
Date: 2016-05-05 08:54 +0200
http://bitbucket.org/pypy/pypy/changeset/aa2b2343b111/
Log: Another passing test
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -207,6 +207,43 @@
res = self.interpret(f, [5])
assert res == 6
+ def test_finalizer_delaying_next_dead(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ b.triggered += 1
+ fq = FQ()
+ def g(): # indirection to avoid leaking the result for too long
+ A()
+ def f(x):
+ b.triggered = 0
+ g()
+ i = 0
+ while i < x:
+ i += 1
+ g()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ assert b.triggered > 0
+ g(); g() # two more
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ num_deleted = 0
+ while fq.next_dead() is not None:
+ num_deleted += 1
+ return num_deleted + 1000 * b.triggered
+ res = self.interpret(f, [5])
+ assert res in (3008, 4008, 5008), "res == %d" % (res,)
+
def test_finalizer_calls_malloc(self):
class B(object):
pass
From pypy.commits at gmail.com Thu May 5 02:55:32 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 23:55:32 -0700 (PDT)
Subject: [pypy-commit] pypy gc-del-3: ready to merge
Message-ID: <572aee64.a553c20a.33b82.ffff80b7@mx.google.com>
Author: Armin Rigo
Branch: gc-del-3
Changeset: r84209:c983268ba364
Date: 2016-05-05 08:54 +0200
http://bitbucket.org/pypy/pypy/changeset/c983268ba364/
Log: ready to merge
From pypy.commits at gmail.com Thu May 5 02:55:34 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 23:55:34 -0700 (PDT)
Subject: [pypy-commit] pypy default: hg merge gc-del-3
Message-ID: <572aee66.8344c20a.2d101.ffff895a@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84210:958642dc2cb6
Date: 2016-05-05 08:55 +0200
http://bitbucket.org/pypy/pypy/changeset/958642dc2cb6/
Log: hg merge gc-del-3
Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-
order.rst. It is a more flexible way to make RPython finalizers.
This branch does not use it in pypy/, it just adds the new way while
keeping the old one valid too.
diff too long, truncating to 2000 out of 2685 lines
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,123 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when there is no more reference to an object. Intended for
+objects that just need to free a block of raw memory or close a file.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects;
+and if you call an external C function, it must be a "safe" function
+(e.g. not releasing the GIL; use ``releasegil=False`` in
+``rffi.llexternal()``).
+
+If there are several objects with destructors that die during the same
+GC cycle, they are called in a completely random order --- but that
+should not matter because destructors cannot do much anyway.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+It is allowed in theory to cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +137,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +241,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +252,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/rpython/doc/rpython.rst b/rpython/doc/rpython.rst
--- a/rpython/doc/rpython.rst
+++ b/rpython/doc/rpython.rst
@@ -191,6 +191,12 @@
``__setitem__`` for slicing isn't supported. Additionally, using negative
indices for slicing is still not support, even when using ``__getslice__``.
+ Note that the destructor ``__del__`` should only contain `simple
+ operations`__; for any kind of more complex destructor, consider
+ using instead ``rpython.rlib.rgc.FinalizerQueue``.
+
+.. __: garbage_collection.html
+
This layout makes the number of types to take care about quite limited.
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -6,6 +6,7 @@
from rpython.memory.support import get_address_stack, get_address_deque
from rpython.memory.support import AddressDict, null_address_dict
from rpython.rtyper.lltypesystem.llmemory import NULL, raw_malloc_usage
+from rpython.rtyper.annlowlevel import cast_adr_to_nongc_instance
TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed),
('size', lltype.Signed),
@@ -36,8 +37,15 @@
def setup(self):
# all runtime mutable values' setup should happen here
# and in its overriden versions! for the benefit of test_transformed_gc
- self.finalizer_lock_count = 0
- self.run_finalizers = self.AddressDeque()
+ self.finalizer_lock = False
+ self.run_old_style_finalizers = self.AddressDeque()
+
+ def mark_finalizer_to_run(self, fq_index, obj):
+ if fq_index == -1: # backward compatibility with old-style finalizer
+ self.run_old_style_finalizers.append(obj)
+ return
+ handlers = self.finalizer_handlers()
+ self._adr2deque(handlers[fq_index].deque).append(obj)
def post_setup(self):
# More stuff that needs to be initialized when the GC is already
@@ -60,8 +68,9 @@
def set_query_functions(self, is_varsize, has_gcptr_in_varsize,
is_gcarrayofgcptr,
- getfinalizer,
- getlightfinalizer,
+ finalizer_handlers,
+ destructor_or_custom_trace,
+ is_old_style_finalizer,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
varsize_offset_to_variable_part,
@@ -74,8 +83,9 @@
fast_path_tracing,
has_gcptr,
cannot_pin):
- self.getfinalizer = getfinalizer
- self.getlightfinalizer = getlightfinalizer
+ self.finalizer_handlers = finalizer_handlers
+ self.destructor_or_custom_trace = destructor_or_custom_trace
+ self.is_old_style_finalizer = is_old_style_finalizer
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
self.is_gcarrayofgcptr = is_gcarrayofgcptr
@@ -136,8 +146,10 @@
the four malloc_[fixed,var]size[_clear]() functions.
"""
size = self.fixed_size(typeid)
- needs_finalizer = bool(self.getfinalizer(typeid))
- finalizer_is_light = bool(self.getlightfinalizer(typeid))
+ needs_finalizer = (bool(self.destructor_or_custom_trace(typeid))
+ and not self.has_custom_trace(typeid))
+ finalizer_is_light = (needs_finalizer and
+ not self.is_old_style_finalizer(typeid))
contains_weakptr = self.weakpointer_offset(typeid) >= 0
assert not (needs_finalizer and contains_weakptr)
if self.is_varsize(typeid):
@@ -323,9 +335,44 @@
callback2, attrname = _convert_callback_formats(callback) # :-/
setattr(self, attrname, arg)
self.root_walker.walk_roots(callback2, callback2, callback2)
- self.run_finalizers.foreach(callback, arg)
+ self.enum_pending_finalizers(callback, arg)
enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)'
+ def enum_pending_finalizers(self, callback, arg):
+ self.run_old_style_finalizers.foreach(callback, arg)
+ handlers = self.finalizer_handlers()
+ i = 0
+ while i < len(handlers):
+ self._adr2deque(handlers[i].deque).foreach(callback, arg)
+ i += 1
+ enum_pending_finalizers._annspecialcase_ = 'specialize:arg(1)'
+
+ def _copy_pending_finalizers_deque(self, deque, copy_fn):
+ tmp = self.AddressDeque()
+ while deque.non_empty():
+ obj = deque.popleft()
+ tmp.append(copy_fn(obj))
+ while tmp.non_empty():
+ deque.append(tmp.popleft())
+ tmp.delete()
+
+ def copy_pending_finalizers(self, copy_fn):
+ "NOTE: not very efficient, but only for SemiSpaceGC and subclasses"
+ self._copy_pending_finalizers_deque(
+ self.run_old_style_finalizers, copy_fn)
+ handlers = self.finalizer_handlers()
+ i = 0
+ while i < len(handlers):
+ h = handlers[i]
+ self._copy_pending_finalizers_deque(
+ self._adr2deque(h.deque), copy_fn)
+ i += 1
+
+ def call_destructor(self, obj):
+ destructor = self.destructor_or_custom_trace(self.get_type_id(obj))
+ ll_assert(bool(destructor), "no destructor found")
+ destructor(obj)
+
def debug_check_consistency(self):
"""To use after a collection. If self.DEBUG is set, this
enumerates all roots and traces all objects to check if we didn't
@@ -364,18 +411,25 @@
def debug_check_object(self, obj):
pass
+ def _adr2deque(self, adr):
+ return cast_adr_to_nongc_instance(self.AddressDeque, adr)
+
def execute_finalizers(self):
- self.finalizer_lock_count += 1
+ if self.finalizer_lock:
+ return # the outer invocation of execute_finalizers() will do it
+ self.finalizer_lock = True
try:
- while self.run_finalizers.non_empty():
- if self.finalizer_lock_count > 1:
- # the outer invocation of execute_finalizers() will do it
- break
- obj = self.run_finalizers.popleft()
- finalizer = self.getfinalizer(self.get_type_id(obj))
- finalizer(obj)
+ handlers = self.finalizer_handlers()
+ i = 0
+ while i < len(handlers):
+ if self._adr2deque(handlers[i].deque).non_empty():
+ handlers[i].trigger()
+ i += 1
+ while self.run_old_style_finalizers.non_empty():
+ obj = self.run_old_style_finalizers.popleft()
+ self.call_destructor(obj)
finally:
- self.finalizer_lock_count -= 1
+ self.finalizer_lock = False
class MovingGCBase(GCBase):
diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py
--- a/rpython/memory/gc/generation.py
+++ b/rpython/memory/gc/generation.py
@@ -355,6 +355,7 @@
scan = beginning = self.free
self.collect_oldrefs_to_nursery()
self.collect_roots_in_nursery()
+ self.collect_young_objects_with_finalizers()
scan = self.scan_objects_just_copied_out_of_nursery(scan)
# at this point, all static and old objects have got their
# GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
@@ -422,6 +423,19 @@
if self.is_in_nursery(obj):
root.address[0] = self.copy(obj)
+ def collect_young_objects_with_finalizers(self):
+ # XXX always walk the whole 'objects_with_finalizers' list here
+ new = self.AddressDeque()
+ while self.objects_with_finalizers.non_empty():
+ obj = self.objects_with_finalizers.popleft()
+ fq_nr = self.objects_with_finalizers.popleft()
+ if self.is_in_nursery(obj):
+ obj = self.copy(obj)
+ new.append(obj)
+ new.append(fq_nr)
+ self.objects_with_finalizers.delete()
+ self.objects_with_finalizers = new
+
def scan_objects_just_copied_out_of_nursery(self, scan):
while scan < self.free:
curr = scan + self.size_gc_header()
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -372,10 +372,19 @@
self.gc_state = STATE_SCANNING
#
- # A list of all objects with finalizers (these are never young).
- self.objects_with_finalizers = self.AddressDeque()
- self.young_objects_with_light_finalizers = self.AddressStack()
- self.old_objects_with_light_finalizers = self.AddressStack()
+ # Two lists of all objects with finalizers. Actually they are lists
+ # of pairs (finalization_queue_nr, object). "probably young objects"
+ # are all traced and moved to the "old" list by the next minor
+ # collection.
+ self.probably_young_objects_with_finalizers = self.AddressDeque()
+ self.old_objects_with_finalizers = self.AddressDeque()
+ p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
+ track_allocation=False)
+ self.singleaddr = llmemory.cast_ptr_to_adr(p)
+ #
+ # Two lists of all objects with destructors.
+ self.young_objects_with_destructors = self.AddressStack()
+ self.old_objects_with_destructors = self.AddressStack()
#
# Two lists of the objects with weakrefs. No weakref can be an
# old object weakly pointing to a young object: indeed, weakrefs
@@ -609,15 +618,18 @@
# If the object needs a finalizer, ask for a rawmalloc.
# The following check should be constant-folded.
if needs_finalizer and not is_finalizer_light:
+ # old-style finalizers only!
ll_assert(not contains_weakptr,
"'needs_finalizer' and 'contains_weakptr' both specified")
obj = self.external_malloc(typeid, 0, alloc_young=False)
- self.objects_with_finalizers.append(obj)
+ res = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+ self.register_finalizer(-1, res)
+ return res
#
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
# should be constant-folded.
- elif rawtotalsize > self.nonlarge_max:
+ if rawtotalsize > self.nonlarge_max:
ll_assert(not contains_weakptr,
"'contains_weakptr' specified for a large object")
obj = self.external_malloc(typeid, 0, alloc_young=True)
@@ -641,13 +653,12 @@
obj = result + size_gc_header
self.init_gc_object(result, typeid, flags=0)
#
- # If it is a weakref or has a lightweight finalizer, record it
+ # If it is a weakref or has a lightweight destructor, record it
# (checks constant-folded).
- if is_finalizer_light:
- self.young_objects_with_light_finalizers.append(obj)
+ if needs_finalizer:
+ self.young_objects_with_destructors.append(obj)
if contains_weakptr:
self.young_objects_with_weakrefs.append(obj)
- #
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
@@ -851,6 +862,7 @@
collect_and_reserve._dont_inline_ = True
+ # XXX kill alloc_young and make it always True
def external_malloc(self, typeid, length, alloc_young):
"""Allocate a large object using the ArenaCollection or
raw_malloc(), possibly as an object with card marking enabled,
@@ -1566,6 +1578,13 @@
self.header(shadow).tid |= GCFLAG_VISITED
new_shadow_object_dict.setitem(obj, shadow)
+ def register_finalizer(self, fq_index, gcobj):
+ from rpython.rtyper.lltypesystem import rffi
+ obj = llmemory.cast_ptr_to_adr(gcobj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
+ self.probably_young_objects_with_finalizers.append(obj)
+ self.probably_young_objects_with_finalizers.append(fq_index)
+
# ----------
# Nursery collection
@@ -1633,6 +1652,11 @@
if self.rrc_enabled:
self.rrc_minor_collection_trace()
#
+ # visit the "probably young" objects with finalizers. They
+ # always all survive.
+ if self.probably_young_objects_with_finalizers.non_empty():
+ self.deal_with_young_objects_with_finalizers()
+ #
while True:
# If we are using card marking, do a partial trace of the arrays
# that are flagged with GCFLAG_CARDS_SET.
@@ -1658,8 +1682,8 @@
# weakrefs' targets.
if self.young_objects_with_weakrefs.non_empty():
self.invalidate_young_weakrefs()
- if self.young_objects_with_light_finalizers.non_empty():
- self.deal_with_young_objects_with_finalizers()
+ if self.young_objects_with_destructors.non_empty():
+ self.deal_with_young_objects_with_destructors()
#
# Clear this mapping. Without pinned objects we just clear the dict
# as all objects in the nursery are dragged out of the nursery and, if
@@ -2221,7 +2245,10 @@
if self.rrc_enabled:
self.rrc_major_collection_trace()
#
- if self.objects_with_finalizers.non_empty():
+ ll_assert(not (self.probably_young_objects_with_finalizers
+ .non_empty()),
+ "probably_young_objects_with_finalizers should be empty")
+ if self.old_objects_with_finalizers.non_empty():
self.deal_with_objects_with_finalizers()
elif self.old_objects_with_weakrefs.non_empty():
# Weakref support: clear the weak pointers to dying objects
@@ -2237,9 +2264,9 @@
self.more_objects_to_trace.delete()
#
- # Light finalizers
- if self.old_objects_with_light_finalizers.non_empty():
- self.deal_with_old_objects_with_finalizers()
+ # Destructors
+ if self.old_objects_with_destructors.non_empty():
+ self.deal_with_old_objects_with_destructors()
# objects_to_trace processed fully, can move on to sweeping
self.ac.mass_free_prepare()
self.start_free_rawmalloc_objects()
@@ -2408,7 +2435,7 @@
#
# If we are in an inner collection caused by a call to a finalizer,
# the 'run_finalizers' objects also need to be kept alive.
- self.run_finalizers.foreach(self._collect_obj, None)
+ self.enum_pending_finalizers(self._collect_obj, None)
def enumerate_all_roots(self, callback, arg):
self.prebuilt_root_objects.foreach(callback, arg)
@@ -2573,41 +2600,45 @@
# ----------
# Finalizers
- def deal_with_young_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_young_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
- while self.young_objects_with_light_finalizers.non_empty():
- obj = self.young_objects_with_light_finalizers.pop()
+ while self.young_objects_with_destructors.non_empty():
+ obj = self.young_objects_with_destructors.pop()
if not self.is_forwarded(obj):
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
+ self.call_destructor(obj)
else:
obj = self.get_forwarding_address(obj)
- self.old_objects_with_light_finalizers.append(obj)
+ self.old_objects_with_destructors.append(obj)
- def deal_with_old_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_old_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
new_objects = self.AddressStack()
- while self.old_objects_with_light_finalizers.non_empty():
- obj = self.old_objects_with_light_finalizers.pop()
+ while self.old_objects_with_destructors.non_empty():
+ obj = self.old_objects_with_destructors.pop()
if self.header(obj).tid & GCFLAG_VISITED:
# surviving
new_objects.append(obj)
else:
# dying
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
- self.old_objects_with_light_finalizers.delete()
- self.old_objects_with_light_finalizers = new_objects
+ self.call_destructor(obj)
+ self.old_objects_with_destructors.delete()
+ self.old_objects_with_destructors = new_objects
+
+ def deal_with_young_objects_with_finalizers(self):
+ while self.probably_young_objects_with_finalizers.non_empty():
+ obj = self.probably_young_objects_with_finalizers.popleft()
+ fq_nr = self.probably_young_objects_with_finalizers.popleft()
+ self.singleaddr.address[0] = obj
+ self._trace_drag_out1(self.singleaddr)
+ obj = self.singleaddr.address[0]
+ self.old_objects_with_finalizers.append(obj)
+ self.old_objects_with_finalizers.append(fq_nr)
def deal_with_objects_with_finalizers(self):
# Walk over list of objects with finalizers.
@@ -2620,14 +2651,17 @@
marked = self.AddressDeque()
pending = self.AddressStack()
self.tmpstack = self.AddressStack()
- while self.objects_with_finalizers.non_empty():
- x = self.objects_with_finalizers.popleft()
+ while self.old_objects_with_finalizers.non_empty():
+ x = self.old_objects_with_finalizers.popleft()
+ fq_nr = self.old_objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.header(x).tid & GCFLAG_VISITED:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
continue
marked.append(x)
+ marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
@@ -2647,22 +2681,26 @@
while marked.non_empty():
x = marked.popleft()
+ fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
if state == 2:
- self.run_finalizers.append(x)
+ from rpython.rtyper.lltypesystem import rffi
+ fq_index = rffi.cast(lltype.Signed, fq_nr)
+ self.mark_finalizer_to_run(fq_index, x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
marked.delete()
- self.objects_with_finalizers.delete()
- self.objects_with_finalizers = new_with_finalizer
+ self.old_objects_with_finalizers.delete()
+ self.old_objects_with_finalizers = new_with_finalizer
def _append_if_nonnull(pointer, stack):
stack.append(pointer.address[0])
@@ -2815,9 +2853,6 @@
self.rrc_o_list_old = self.AddressStack()
self.rrc_p_dict = self.AddressDict() # non-nursery keys only
self.rrc_p_dict_nurs = self.AddressDict() # nursery keys only
- p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
- track_allocation=False)
- self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p)
self.rrc_dealloc_trigger_callback = dealloc_trigger_callback
self.rrc_dealloc_pending = self.AddressStack()
self.rrc_enabled = True
@@ -2887,7 +2922,7 @@
self.rrc_p_dict_nurs.delete()
self.rrc_p_dict_nurs = self.AddressDict(length_estimate)
self.rrc_p_list_young.foreach(self._rrc_minor_trace,
- self.rrc_singleaddr)
+ self.singleaddr)
def _rrc_minor_trace(self, pyobject, singleaddr):
from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY
diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py
--- a/rpython/memory/gc/minimark.py
+++ b/rpython/memory/gc/minimark.py
@@ -153,6 +153,8 @@
# ^^^ prebuilt objects may have the flag GCFLAG_HAS_SHADOW;
# then they are one word longer, the extra word storing the hash.
+ _ADDRARRAY = lltype.Array(llmemory.Address, hints={'nolength': True})
+
# During a minor collection, the objects in the nursery that are
# moved outside are changed in-place: their header is replaced with
@@ -309,10 +311,19 @@
self.old_rawmalloced_objects = self.AddressStack()
self.rawmalloced_total_size = r_uint(0)
#
- # A list of all objects with finalizers (these are never young).
- self.objects_with_finalizers = self.AddressDeque()
- self.young_objects_with_light_finalizers = self.AddressStack()
- self.old_objects_with_light_finalizers = self.AddressStack()
+ # Two lists of all objects with finalizers. Actually they are lists
+ # of pairs (finalization_queue_nr, object). "probably young objects"
+ # are all traced and moved to the "old" list by the next minor
+ # collection.
+ self.probably_young_objects_with_finalizers = self.AddressDeque()
+ self.old_objects_with_finalizers = self.AddressDeque()
+ p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw',
+ track_allocation=False)
+ self.singleaddr = llmemory.cast_ptr_to_adr(p)
+ #
+ # Two lists of all objects with destructors.
+ self.young_objects_with_destructors = self.AddressStack()
+ self.old_objects_with_destructors = self.AddressStack()
#
# Two lists of the objects with weakrefs. No weakref can be an
# old object weakly pointing to a young object: indeed, weakrefs
@@ -517,15 +528,18 @@
# If the object needs a finalizer, ask for a rawmalloc.
# The following check should be constant-folded.
if needs_finalizer and not is_finalizer_light:
+ # old-style finalizers only!
ll_assert(not contains_weakptr,
"'needs_finalizer' and 'contains_weakptr' both specified")
obj = self.external_malloc(typeid, 0, alloc_young=False)
- self.objects_with_finalizers.append(obj)
+ res = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
+ self.register_finalizer(-1, res)
+ return res
#
# If totalsize is greater than nonlarge_max (which should never be
# the case in practice), ask for a rawmalloc. The following check
# should be constant-folded.
- elif rawtotalsize > self.nonlarge_max:
+ if rawtotalsize > self.nonlarge_max:
ll_assert(not contains_weakptr,
"'contains_weakptr' specified for a large object")
obj = self.external_malloc(typeid, 0, alloc_young=True)
@@ -547,14 +561,14 @@
# Build the object.
llarena.arena_reserve(result, totalsize)
obj = result + size_gc_header
- if is_finalizer_light:
- self.young_objects_with_light_finalizers.append(obj)
self.init_gc_object(result, typeid, flags=0)
- #
- # If it is a weakref, record it (check constant-folded).
- if contains_weakptr:
- self.young_objects_with_weakrefs.append(obj)
#
+ # If it is a weakref or has a lightweight destructor, record it
+ # (checks constant-folded).
+ if needs_finalizer:
+ self.young_objects_with_destructors.append(obj)
+ if contains_weakptr:
+ self.young_objects_with_weakrefs.append(obj)
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
@@ -676,6 +690,7 @@
collect_and_reserve._dont_inline_ = True
+ # XXX kill alloc_young and make it always True
def external_malloc(self, typeid, length, alloc_young):
"""Allocate a large object using the ArenaCollection or
raw_malloc(), possibly as an object with card marking enabled,
@@ -1241,6 +1256,13 @@
self.old_objects_with_cards_set.append(dest_addr)
dest_hdr.tid |= GCFLAG_CARDS_SET
+ def register_finalizer(self, fq_index, gcobj):
+ from rpython.rtyper.lltypesystem import rffi
+ obj = llmemory.cast_ptr_to_adr(gcobj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
+ self.probably_young_objects_with_finalizers.append(obj)
+ self.probably_young_objects_with_finalizers.append(fq_index)
+
# ----------
# Nursery collection
@@ -1264,6 +1286,11 @@
# 'old_objects_pointing_to_young'.
self.collect_roots_in_nursery()
#
+ # visit the "probably young" objects with finalizers. They
+ # always all survive.
+ if self.probably_young_objects_with_finalizers.non_empty():
+ self.deal_with_young_objects_with_finalizers()
+ #
while True:
# If we are using card marking, do a partial trace of the arrays
# that are flagged with GCFLAG_CARDS_SET.
@@ -1288,8 +1315,8 @@
# weakrefs' targets.
if self.young_objects_with_weakrefs.non_empty():
self.invalidate_young_weakrefs()
- if self.young_objects_with_light_finalizers.non_empty():
- self.deal_with_young_objects_with_finalizers()
+ if self.young_objects_with_destructors.non_empty():
+ self.deal_with_young_objects_with_destructors()
#
# Clear this mapping.
if self.nursery_objects_shadows.length() > 0:
@@ -1613,7 +1640,7 @@
# with a finalizer and all objects reachable from there (and also
# moves some objects from 'objects_with_finalizers' to
# 'run_finalizers').
- if self.objects_with_finalizers.non_empty():
+ if self.old_objects_with_finalizers.non_empty():
self.deal_with_objects_with_finalizers()
#
self.objects_to_trace.delete()
@@ -1621,8 +1648,8 @@
# Weakref support: clear the weak pointers to dying objects
if self.old_objects_with_weakrefs.non_empty():
self.invalidate_old_weakrefs()
- if self.old_objects_with_light_finalizers.non_empty():
- self.deal_with_old_objects_with_finalizers()
+ if self.old_objects_with_destructors.non_empty():
+ self.deal_with_old_objects_with_destructors()
#
# Walk all rawmalloced objects and free the ones that don't
@@ -1745,8 +1772,8 @@
#
# If we are in an inner collection caused by a call to a finalizer,
# the 'run_finalizers' objects also need to be kept alive.
- self.run_finalizers.foreach(self._collect_obj,
- self.objects_to_trace)
+ self.enum_pending_finalizers(self._collect_obj,
+ self.objects_to_trace)
def enumerate_all_roots(self, callback, arg):
self.prebuilt_root_objects.foreach(callback, arg)
@@ -1878,41 +1905,45 @@
# ----------
# Finalizers
- def deal_with_young_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_young_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
- while self.young_objects_with_light_finalizers.non_empty():
- obj = self.young_objects_with_light_finalizers.pop()
+ while self.young_objects_with_destructors.non_empty():
+ obj = self.young_objects_with_destructors.pop()
if not self.is_forwarded(obj):
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
+ self.call_destructor(obj)
else:
obj = self.get_forwarding_address(obj)
- self.old_objects_with_light_finalizers.append(obj)
+ self.old_objects_with_destructors.append(obj)
- def deal_with_old_objects_with_finalizers(self):
- """ This is a much simpler version of dealing with finalizers
- and an optimization - we can reasonably assume that those finalizers
- don't do anything fancy and *just* call them. Among other things
+ def deal_with_old_objects_with_destructors(self):
+ """We can reasonably assume that destructors don't do
+ anything fancy and *just* call them. Among other things
they won't resurrect objects
"""
new_objects = self.AddressStack()
- while self.old_objects_with_light_finalizers.non_empty():
- obj = self.old_objects_with_light_finalizers.pop()
+ while self.old_objects_with_destructors.non_empty():
+ obj = self.old_objects_with_destructors.pop()
if self.header(obj).tid & GCFLAG_VISITED:
# surviving
new_objects.append(obj)
else:
# dying
- finalizer = self.getlightfinalizer(self.get_type_id(obj))
- ll_assert(bool(finalizer), "no light finalizer found")
- finalizer(obj)
- self.old_objects_with_light_finalizers.delete()
- self.old_objects_with_light_finalizers = new_objects
+ self.call_destructor(obj)
+ self.old_objects_with_destructors.delete()
+ self.old_objects_with_destructors = new_objects
+
+ def deal_with_young_objects_with_finalizers(self):
+ while self.probably_young_objects_with_finalizers.non_empty():
+ obj = self.probably_young_objects_with_finalizers.popleft()
+ fq_nr = self.probably_young_objects_with_finalizers.popleft()
+ self.singleaddr.address[0] = obj
+ self._trace_drag_out1(self.singleaddr)
+ obj = self.singleaddr.address[0]
+ self.old_objects_with_finalizers.append(obj)
+ self.old_objects_with_finalizers.append(fq_nr)
def deal_with_objects_with_finalizers(self):
# Walk over list of objects with finalizers.
@@ -1925,14 +1956,17 @@
marked = self.AddressDeque()
pending = self.AddressStack()
self.tmpstack = self.AddressStack()
- while self.objects_with_finalizers.non_empty():
- x = self.objects_with_finalizers.popleft()
+ while self.old_objects_with_finalizers.non_empty():
+ x = self.old_objects_with_finalizers.popleft()
+ fq_nr = self.old_objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.header(x).tid & GCFLAG_VISITED:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
continue
marked.append(x)
+ marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
@@ -1946,22 +1980,26 @@
while marked.non_empty():
x = marked.popleft()
+ fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
if state == 2:
- self.run_finalizers.append(x)
+ from rpython.rtyper.lltypesystem import rffi
+ fq_index = rffi.cast(lltype.Signed, fq_nr)
+ self.mark_finalizer_to_run(fq_index, x)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(x)
+ new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
marked.delete()
- self.objects_with_finalizers.delete()
- self.objects_with_finalizers = new_with_finalizer
+ self.old_objects_with_finalizers.delete()
+ self.old_objects_with_finalizers = new_with_finalizer
def _append_if_nonnull(pointer, stack):
stack.append(pointer.address[0])
diff --git a/rpython/memory/gc/semispace.py b/rpython/memory/gc/semispace.py
--- a/rpython/memory/gc/semispace.py
+++ b/rpython/memory/gc/semispace.py
@@ -111,7 +111,9 @@
# self.objects_with_light_finalizers.append(result + size_gc_header)
#else:
if has_finalizer:
+ from rpython.rtyper.lltypesystem import rffi
self.objects_with_finalizers.append(result + size_gc_header)
+ self.objects_with_finalizers.append(rffi.cast(llmemory.Address, -1))
if contains_weakptr:
self.objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
@@ -149,6 +151,13 @@
else:
return False
+ def register_finalizer(self, fq_index, gcobj):
+ from rpython.rtyper.lltypesystem import rffi
+ obj = llmemory.cast_ptr_to_adr(gcobj)
+ fq_index = rffi.cast(llmemory.Address, fq_index)
+ self.objects_with_finalizers.append(obj)
+ self.objects_with_finalizers.append(fq_index)
+
def obtain_free_space(self, needed):
# a bit of tweaking to maximize the performance and minimize the
# amount of code in an inlined version of malloc_fixedsize_clear()
@@ -268,8 +277,7 @@
scan = self.free = tospace
self.starting_full_collect()
self.collect_roots()
- if self.run_finalizers.non_empty():
- self.update_run_finalizers()
+ self.copy_pending_finalizers(self.copy)
scan = self.scan_copied(scan)
if self.objects_with_light_finalizers.non_empty():
self.deal_with_objects_with_light_finalizers()
@@ -499,8 +507,7 @@
if self.surviving(obj):
new_objects.append(self.get_forwarding_address(obj))
else:
- finalizer = self.getfinalizer(self.get_type_id(obj))
- finalizer(obj)
+ self.call_destructor(obj)
self.objects_with_light_finalizers.delete()
self.objects_with_light_finalizers = new_objects
@@ -517,12 +524,15 @@
self.tmpstack = self.AddressStack()
while self.objects_with_finalizers.non_empty():
x = self.objects_with_finalizers.popleft()
+ fq_nr = self.objects_with_finalizers.popleft()
ll_assert(self._finalization_state(x) != 1,
"bad finalization state 1")
if self.surviving(x):
new_with_finalizer.append(self.get_forwarding_address(x))
+ new_with_finalizer.append(fq_nr)
continue
marked.append(x)
+ marked.append(fq_nr)
pending.append(x)
while pending.non_empty():
y = pending.pop()
@@ -537,17 +547,21 @@
while marked.non_empty():
x = marked.popleft()
+ fq_nr = marked.popleft()
state = self._finalization_state(x)
ll_assert(state >= 2, "unexpected finalization state < 2")
newx = self.get_forwarding_address(x)
if state == 2:
- self.run_finalizers.append(newx)
+ from rpython.rtyper.lltypesystem import rffi
+ fq_index = rffi.cast(lltype.Signed, fq_nr)
+ self.mark_finalizer_to_run(fq_index, newx)
# we must also fix the state from 2 to 3 here, otherwise
# we leave the GCFLAG_FINALIZATION_ORDERING bit behind
# which will confuse the next collection
self._recursively_bump_finalization_state_from_2_to_3(x)
else:
new_with_finalizer.append(newx)
+ new_with_finalizer.append(fq_nr)
self.tmpstack.delete()
pending.delete()
@@ -627,16 +641,6 @@
self.objects_with_weakrefs.delete()
self.objects_with_weakrefs = new_with_weakref
- def update_run_finalizers(self):
- # we are in an inner collection, caused by a finalizer
- # the run_finalizers objects need to be copied
- new_run_finalizer = self.AddressDeque()
- while self.run_finalizers.non_empty():
- obj = self.run_finalizers.popleft()
- new_run_finalizer.append(self.copy(obj))
- self.run_finalizers.delete()
- self.run_finalizers = new_run_finalizer
-
def _is_external(self, obj):
return (self.header(obj).tid & GCFLAG_EXTERNAL) != 0
diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -8,7 +8,7 @@
import py
from rpython.rtyper.lltypesystem import lltype, llmemory
-from rpython.memory.gctypelayout import TypeLayoutBuilder
+from rpython.memory.gctypelayout import TypeLayoutBuilder, FIN_HANDLER_ARRAY
from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int
from rpython.memory.gc import minimark, incminimark
from rpython.memory.gctypelayout import zero_gc_pointers_inside, zero_gc_pointers
@@ -84,7 +84,9 @@
self.gc.set_root_walker(self.rootwalker)
self.layoutbuilder = TypeLayoutBuilder(self.GCClass)
self.get_type_id = self.layoutbuilder.get_type_id
- self.layoutbuilder.initialize_gc_query_function(self.gc)
+ gcdata = self.layoutbuilder.initialize_gc_query_function(self.gc)
+ ll_handlers = lltype.malloc(FIN_HANDLER_ARRAY, 0, immortal=True)
+ gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
self.gc.setup()
def consider_constant(self, p):
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -9,8 +9,10 @@
from rpython.memory import gctypelayout
from rpython.memory.gctransform.log import log
from rpython.memory.gctransform.support import get_rtti, ll_call_destructor
+from rpython.memory.gctransform.support import ll_report_finalizer_error
from rpython.memory.gctransform.transform import GCTransformer
from rpython.memory.gctypelayout import ll_weakref_deref, WEAKREF, WEAKREFPTR
+from rpython.memory.gctypelayout import FIN_TRIGGER_FUNC, FIN_HANDLER_ARRAY
from rpython.tool.sourcetools import func_with_new_name
from rpython.translator.backendopt import graphanalyze
from rpython.translator.backendopt.finalizer import FinalizerAnalyzer
@@ -181,8 +183,11 @@
gcdata.max_type_id = 13 # patched in finish()
gcdata.typeids_z = a_random_address # patched in finish()
gcdata.typeids_list = a_random_address # patched in finish()
+ gcdata.finalizer_handlers = a_random_address # patched in finish()
self.gcdata = gcdata
self.malloc_fnptr_cache = {}
+ self.finalizer_queue_indexes = {}
+ self.finalizer_handlers = []
gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS)
root_walker = self.build_root_walker()
@@ -217,6 +222,7 @@
data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger())
data_classdef.generalize_attr('typeids_z', SomeAddress())
data_classdef.generalize_attr('typeids_list', SomeAddress())
+ data_classdef.generalize_attr('finalizer_handlers', SomeAddress())
annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper)
@@ -555,6 +561,12 @@
[s_gc, s_typeid16],
s_gcref)
+ self.register_finalizer_ptr = getfn(GCClass.register_finalizer,
+ [s_gc,
+ annmodel.SomeInteger(),
+ s_gcref],
+ annmodel.s_None)
+
def create_custom_trace_funcs(self, gc, rtyper):
custom_trace_funcs = tuple(rtyper.custom_trace_funcs)
rtyper.custom_trace_funcs = custom_trace_funcs
@@ -681,6 +693,16 @@
ll_instance.inst_typeids_list= llmemory.cast_ptr_to_adr(ll_typeids_list)
newgcdependencies.append(ll_typeids_list)
#
+ handlers = self.finalizer_handlers
+ ll_handlers = lltype.malloc(FIN_HANDLER_ARRAY, len(handlers),
+ immortal=True)
+ for i in range(len(handlers)):
+ ll_handlers[i].deque = handlers[i][0]
+ ll_handlers[i].trigger = handlers[i][1]
+ ll_instance.inst_finalizer_handlers = llmemory.cast_ptr_to_adr(
+ ll_handlers)
+ newgcdependencies.append(ll_handlers)
+ #
return newgcdependencies
def get_finish_tables(self):
@@ -772,10 +794,8 @@
info = self.layoutbuilder.get_info(type_id)
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
fptrs = self.special_funcptr_for_type(TYPE)
- has_finalizer = "finalizer" in fptrs
- has_light_finalizer = "light_finalizer" in fptrs
- if has_light_finalizer:
- has_finalizer = True
+ has_finalizer = "destructor" in fptrs or "old_style_finalizer" in fptrs
+ has_light_finalizer = "destructor" in fptrs
c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)
c_has_light_finalizer = rmodel.inputconst(lltype.Bool,
has_light_finalizer)
@@ -1498,6 +1518,60 @@
return None
return getattr(obj, '_hash_cache_', None)
+ def get_finalizer_queue_index(self, hop):
+ fq_tag = hop.spaceop.args[0].value
+ assert 'FinalizerQueue TAG' in fq_tag.expr
+ fq = fq_tag.default
+ try:
+ index = self.finalizer_queue_indexes[fq]
+ except KeyError:
+ index = len(self.finalizer_queue_indexes)
+ assert index == len(self.finalizer_handlers)
+ deque = self.gcdata.gc.AddressDeque()
+ #
+ def ll_finalizer_trigger():
+ try:
+ fq.finalizer_trigger()
+ except Exception as e:
+ ll_report_finalizer_error(e)
+ ll_trigger = self.annotate_finalizer(ll_finalizer_trigger, [],
+ lltype.Void)
+ def ll_next_dead():
+ if deque.non_empty():
+ return deque.popleft()
+ else:
+ return llmemory.NULL
+ ll_next_dead = self.annotate_finalizer(ll_next_dead, [],
+ llmemory.Address)
+ c_ll_next_dead = rmodel.inputconst(lltype.typeOf(ll_next_dead),
+ ll_next_dead)
+ #
+ s_deque = self.translator.annotator.bookkeeper.immutablevalue(deque)
+ r_deque = self.translator.rtyper.getrepr(s_deque)
+ ll_deque = r_deque.convert_const(deque)
+ adr_deque = llmemory.cast_ptr_to_adr(ll_deque)
+ #
+ self.finalizer_handlers.append((adr_deque, ll_trigger,
+ c_ll_next_dead))
+ self.finalizer_queue_indexes[fq] = index
+ return index
+
+ def gct_gc_fq_register(self, hop):
+ index = self.get_finalizer_queue_index(hop)
+ c_index = rmodel.inputconst(lltype.Signed, index)
+ v_ptr = hop.spaceop.args[1]
+ v_ptr = hop.genop("cast_opaque_ptr", [v_ptr],
+ resulttype=llmemory.GCREF)
+ hop.genop("direct_call", [self.register_finalizer_ptr, self.c_const_gc,
+ c_index, v_ptr])
+
+ def gct_gc_fq_next_dead(self, hop):
+ index = self.get_finalizer_queue_index(hop)
+ c_ll_next_dead = self.finalizer_handlers[index][2]
+ v_adr = hop.genop("direct_call", [c_ll_next_dead],
+ resulttype=llmemory.Address)
+ hop.genop("cast_adr_to_ptr", [v_adr],
+ resultvar = hop.spaceop.result)
class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder):
@@ -1513,22 +1587,18 @@
self.translator = translator
super(TransformerLayoutBuilder, self).__init__(GCClass, lltype2vtable)
- def has_finalizer(self, TYPE):
+ def has_destructor(self, TYPE):
rtti = get_rtti(TYPE)
return rtti is not None and getattr(rtti._obj, 'destructor_funcptr',
None)
- def has_light_finalizer(self, TYPE):
- fptrs = self.special_funcptr_for_type(TYPE)
- return "light_finalizer" in fptrs
-
def has_custom_trace(self, TYPE):
rtti = get_rtti(TYPE)
return rtti is not None and getattr(rtti._obj, 'custom_trace_funcptr',
None)
- def make_finalizer_funcptr_for_type(self, TYPE):
- if not self.has_finalizer(TYPE):
+ def make_destructor_funcptr_for_type(self, TYPE):
+ if not self.has_destructor(TYPE):
return None, False
rtti = get_rtti(TYPE)
destrptr = rtti._obj.destructor_funcptr
diff --git a/rpython/memory/gctransform/support.py b/rpython/memory/gctransform/support.py
--- a/rpython/memory/gctransform/support.py
+++ b/rpython/memory/gctransform/support.py
@@ -89,3 +89,11 @@
write(2, " ignoring it\n")
except:
pass
+
+def ll_report_finalizer_error(e):
+ try:
+ write(2, "triggering finalizers raised an exception ")
+ write(2, str(e))
+ write(2, " ignoring it\n")
+ except:
+ pass
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -17,16 +17,17 @@
OFFSETS_TO_GC_PTR = lltype.Array(lltype.Signed)
- # A custom tracer (CT), enumerates the addresses that contain GCREFs.
- # It is called with the object as first argument, and the previous
- # returned address (or NULL the first time) as the second argument.
- FINALIZER_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
- FINALIZER = lltype.Ptr(FINALIZER_FUNC)
+ # A CUSTOM_FUNC is either a destructor, or a custom tracer.
+ # A destructor is called when the object is about to be freed.
+ # A custom tracer (CT) enumerates the addresses that contain GCREFs.
+ # Both are called with the address of the object as only argument.
+ CUSTOM_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
+ CUSTOM_FUNC_PTR = lltype.Ptr(CUSTOM_FUNC)
# structure describing the layout of a typeid
TYPE_INFO = lltype.Struct("type_info",
("infobits", lltype.Signed), # combination of the T_xxx consts
- ("finalizer", FINALIZER),
+ ("customfunc", CUSTOM_FUNC_PTR),
("fixedsize", lltype.Signed),
("ofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
hints={'immutable': True},
@@ -80,16 +81,18 @@
def q_cannot_pin(self, typeid):
typeinfo = self.get(typeid)
ANY = (T_HAS_GCPTR | T_IS_WEAKREF)
- return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.finalizer)
+ return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
- def q_finalizer(self, typeid):
- return self.get(typeid).finalizer
+ def q_finalizer_handlers(self):
+ adr = self.finalizer_handlers # set from framework.py or gcwrapper.py
+ return llmemory.cast_adr_to_ptr(adr, lltype.Ptr(FIN_HANDLER_ARRAY))
- def q_light_finalizer(self, typeid):
+ def q_destructor_or_custom_trace(self, typeid):
+ return self.get(typeid).customfunc
+
+ def q_is_old_style_finalizer(self, typeid):
typeinfo = self.get(typeid)
- if typeinfo.infobits & T_HAS_LIGHTWEIGHT_FINALIZER:
- return typeinfo.finalizer
- return lltype.nullptr(GCData.FINALIZER_FUNC)
+ return (typeinfo.infobits & T_HAS_OLDSTYLE_FINALIZER) != 0
def q_offsets_to_gc_pointers(self, typeid):
return self.get(typeid).ofstoptrs
@@ -141,8 +144,9 @@
self.q_is_varsize,
self.q_has_gcptr_in_varsize,
self.q_is_gcarrayofgcptr,
- self.q_finalizer,
- self.q_light_finalizer,
+ self.q_finalizer_handlers,
+ self.q_destructor_or_custom_trace,
+ self.q_is_old_style_finalizer,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
self.q_varsize_item_sizes,
@@ -170,7 +174,7 @@
T_IS_WEAKREF = 0x080000
T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT
T_HAS_CUSTOM_TRACE = 0x200000
-T_HAS_LIGHTWEIGHT_FINALIZER = 0x400000
+T_HAS_OLDSTYLE_FINALIZER = 0x400000
T_HAS_GCPTR = 0x1000000
T_KEY_MASK = intmask(0xFE000000) # bug detection only
T_KEY_VALUE = intmask(0x5A000000) # bug detection only
@@ -199,11 +203,11 @@
#
fptrs = builder.special_funcptr_for_type(TYPE)
if fptrs:
- if "finalizer" in fptrs:
- info.finalizer = fptrs["finalizer"]
- if "light_finalizer" in fptrs:
- info.finalizer = fptrs["light_finalizer"]
- infobits |= T_HAS_LIGHTWEIGHT_FINALIZER
+ if "destructor" in fptrs:
+ info.customfunc = fptrs["destructor"]
+ if "old_style_finalizer" in fptrs:
+ info.customfunc = fptrs["old_style_finalizer"]
+ infobits |= T_HAS_OLDSTYLE_FINALIZER
#
if not TYPE._is_varsize():
info.fixedsize = llarena.round_up_for_allocation(
@@ -373,21 +377,21 @@
def special_funcptr_for_type(self, TYPE):
if TYPE in self._special_funcptrs:
return self._special_funcptrs[TYPE]
- fptr1, is_lightweight = self.make_finalizer_funcptr_for_type(TYPE)
+ fptr1, is_lightweight = self.make_destructor_funcptr_for_type(TYPE)
fptr2 = self.make_custom_trace_funcptr_for_type(TYPE)
result = {}
if fptr1:
if is_lightweight:
- result["light_finalizer"] = fptr1
+ result["destructor"] = fptr1
else:
- result["finalizer"] = fptr1
+ result["old_style_finalizer"] = fptr1
if fptr2:
result["custom_trace"] = fptr2
self._special_funcptrs[TYPE] = result
return result
- def make_finalizer_funcptr_for_type(self, TYPE):
- # must be overridden for proper finalizer support
+ def make_destructor_funcptr_for_type(self, TYPE):
+ # must be overridden for proper destructor support
return None, False
def make_custom_trace_funcptr_for_type(self, TYPE):
@@ -546,3 +550,9 @@
link = lltype.malloc(WEAKREF, immortal=True)
link.weakptr = llmemory.cast_ptr_to_adr(targetptr)
return link
+
+########## finalizers ##########
+
+FIN_TRIGGER_FUNC = lltype.FuncType([], lltype.Void)
+FIN_HANDLER_ARRAY = lltype.Array(('deque', llmemory.Address),
+ ('trigger', lltype.Ptr(FIN_TRIGGER_FUNC)))
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -1,7 +1,7 @@
from rpython.translator.backendopt.finalizer import FinalizerAnalyzer
from rpython.rtyper.lltypesystem import lltype, llmemory, llheap
-from rpython.rtyper import llinterp
-from rpython.rtyper.annlowlevel import llhelper
+from rpython.rtyper import llinterp, rclass
+from rpython.rtyper.annlowlevel import llhelper, cast_nongc_instance_to_adr
from rpython.memory import gctypelayout
from rpython.flowspace.model import Constant
@@ -15,6 +15,7 @@
chunk_size = 10,
translated_to_c = False,
**GC_PARAMS)
+ self.translator = translator
self.gc.set_root_walker(LLInterpRootWalker(self))
self.gc.DEBUG = True
self.llinterp = llinterp
@@ -30,6 +31,11 @@
self.llinterp)
self.get_type_id = layoutbuilder.get_type_id
gcdata = layoutbuilder.initialize_gc_query_function(self.gc)
+ self.gcdata = gcdata
+
+ self.finalizer_queue_indexes = {}
+ self.finalizer_handlers = []
+ self.update_finalizer_handlers()
constants = collect_constants(flowgraphs)
for obj in constants:
@@ -187,6 +193,55 @@
def thread_run(self):
pass
+ def _get_finalizer_trigger(self, fq):
+ graph = self.translator._graphof(fq.finalizer_trigger.im_func)
+ def ll_trigger():
+ try:
+ self.llinterp.eval_graph(graph, [None], recursive=True)
+ except llinterp.LLException:
+ raise RuntimeError(
+ "finalizer_trigger() raised an exception, shouldn't happen")
+ return ll_trigger
+
+ def update_finalizer_handlers(self):
+ handlers = self.finalizer_handlers
+ ll_handlers = lltype.malloc(gctypelayout.FIN_HANDLER_ARRAY,
+ len(handlers), immortal=True)
+ for i in range(len(handlers)):
+ fq, deque = handlers[i]
+ ll_handlers[i].deque = cast_nongc_instance_to_adr(deque)
+ ll_handlers[i].trigger = llhelper(
+ lltype.Ptr(gctypelayout.FIN_TRIGGER_FUNC),
+ self._get_finalizer_trigger(fq))
+ self.gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
+
+ def get_finalizer_queue_index(self, fq_tag):
+ assert 'FinalizerQueue TAG' in fq_tag.expr
+ fq = fq_tag.default
+ try:
+ index = self.finalizer_queue_indexes[fq]
+ except KeyError:
+ index = len(self.finalizer_handlers)
+ self.finalizer_queue_indexes[fq] = index
+ deque = self.gc.AddressDeque()
+ self.finalizer_handlers.append((fq, deque))
+ self.update_finalizer_handlers()
+ return index
+
+ def gc_fq_next_dead(self, fq_tag):
+ index = self.get_finalizer_queue_index(fq_tag)
+ deque = self.finalizer_handlers[index][1]
+ if deque.non_empty():
+ obj = deque.popleft()
+ else:
+ obj = llmemory.NULL
+ return llmemory.cast_adr_to_ptr(obj, rclass.OBJECTPTR)
+
+ def gc_fq_register(self, fq_tag, ptr):
+ index = self.get_finalizer_queue_index(fq_tag)
+ ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
+ self.gc.register_finalizer(index, ptr)
+
# ____________________________________________________________
class LLInterpRootWalker:
@@ -228,7 +283,7 @@
self.llinterp = llinterp
super(DirectRunLayoutBuilder, self).__init__(GCClass, lltype2vtable)
- def make_finalizer_funcptr_for_type(self, TYPE):
+ def make_destructor_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
@@ -239,15 +294,17 @@
return None, False
t = self.llinterp.typer.annotator.translator
- light = not FinalizerAnalyzer(t).analyze_light_finalizer(destrgraph)
- def ll_finalizer(addr):
+ is_light = not FinalizerAnalyzer(t).analyze_light_finalizer(destrgraph)
+
+ def ll_destructor(addr):
try:
v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
self.llinterp.eval_graph(destrgraph, [v], recursive=True)
except llinterp.LLException:
raise RuntimeError(
- "a finalizer raised an exception, shouldn't happen")
- return llhelper(gctypelayout.GCData.FINALIZER, ll_finalizer), light
+ "a destructor raised an exception, shouldn't happen")
+ return (llhelper(gctypelayout.GCData.CUSTOM_FUNC_PTR, ll_destructor),
+ is_light)
def make_custom_trace_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
diff --git a/rpython/memory/support.py b/rpython/memory/support.py
--- a/rpython/memory/support.py
+++ b/rpython/memory/support.py
@@ -2,6 +2,9 @@
from rpython.rlib.objectmodel import free_non_gc_object, we_are_translated
from rpython.rlib.debug import ll_assert
from rpython.tool.identity_dict import identity_dict
+from rpython.rtyper.rclass import NONGCOBJECTPTR
+from rpython.rtyper.annlowlevel import cast_nongc_instance_to_base_ptr
+from rpython.rtyper.annlowlevel import cast_base_ptr_to_nongc_instance
def mangle_hash(i):
@@ -292,6 +295,9 @@
cur = next
free_non_gc_object(self)
+ def _was_freed(self):
+ return False # otherwise, the __class__ changes
+
cache[chunk_size] = AddressDeque
return AddressDeque
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -128,7 +128,7 @@
assert res == concat(100)
#assert simulator.current_size - curr < 16000 * INT_SIZE / 4
- def test_finalizer(self):
+ def test_destructor(self):
class B(object):
pass
b = B()
@@ -152,6 +152,98 @@
res = self.interpret(f, [5])
assert res == 6
+ def test_old_style_finalizer(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ def __del__(self):
+ llop.gc__collect(lltype.Void)
+ b.num_deleted += 1
+ def f(x):
+ a = A()
+ i = 0
+ while i < x:
+ i += 1
+ a = A()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted
+ res = self.interpret(f, [5])
+ assert res == 6
+
+ def test_finalizer(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ fq = FQ()
+ def f(x):
+ a = A()
+ i = 0
+ while i < x:
+ i += 1
+ a = A()
+ a = None
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted
+ res = self.interpret(f, [5])
+ assert res == 6
+
+ def test_finalizer_delaying_next_dead(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ b.triggered += 1
+ fq = FQ()
+ def g(): # indirection to avoid leaking the result for too long
+ A()
+ def f(x):
+ b.triggered = 0
+ g()
+ i = 0
+ while i < x:
+ i += 1
+ g()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ assert b.triggered > 0
+ g(); g() # two more
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ num_deleted = 0
+ while fq.next_dead() is not None:
+ num_deleted += 1
+ return num_deleted + 1000 * b.triggered
+ res = self.interpret(f, [5])
+ assert res in (3008, 4008, 5008), "res == %d" % (res,)
+
def test_finalizer_calls_malloc(self):
class B(object):
pass
@@ -162,18 +254,27 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- C()
+ fq.register_finalizer(self)
class C(A):
- def __del__(self):
- b.num_deleted += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted += 1
+ if not isinstance(a, C):
+ C()
+ fq = FQ()
def f(x):
a = A()
i = 0
while i < x:
i += 1
a = A()
+ a = None
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
@@ -190,15 +291,21 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- llop.gc__collect(lltype.Void)
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ llop.gc__collect(lltype.Void)
+ fq = FQ()
def f(x):
a = A()
i = 0
while i < x:
i += 1
a = A()
+ a = None
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
@@ -215,20 +322,29 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- b.a = self
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted += 1
+ b.a = a
+ fq = FQ()
def f(x):
a = A()
i = 0
while i < x:
i += 1
a = A()
+ a = None
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
aid = b.a.id
b.a = None
- # check that __del__ is not called again
+ # check that finalizer_trigger() is not called again
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted * 10 + aid + 100 * (b.a is None)
@@ -290,7 +406,7 @@
res = self.interpret(f, [])
assert res
- def test_weakref_to_object_with_finalizer(self):
+ def test_weakref_to_object_with_destructor(self):
import weakref
class A(object):
count = 0
@@ -310,6 +426,32 @@
res = self.interpret(f, [])
assert res
+ def test_weakref_to_object_with_finalizer(self):
+ import weakref
+ class A(object):
+ count = 0
+ a = A()
+ class B(object):
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ a.count += 1
+ fq = FQ()
+ def g():
+ b = B()
+ fq.register_finalizer(b)
+ return weakref.ref(b)
+ def f():
+ ref = g()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ result = a.count == 1 and (ref() is None)
+ return result
+ res = self.interpret(f, [])
+ assert res
+
def test_bug_1(self):
import weakref
class B(object):
@@ -329,23 +471,32 @@
res = self.interpret(f, [])
assert res
- def test_cycle_with_weakref_and_del(self):
+ def test_cycle_with_weakref_and_finalizer(self):
import weakref
class A(object):
count = 0
a = A()
class B(object):
- def __del__(self):
- # when __del__ is called, the weakref to c should be dead
- if self.ref() is None:
- a.count += 10 # ok
- else:
- a.count = 666 # not ok
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while True:
+ b = self.next_dead()
+ if b is None:
+ break
+ # when we are here, the weakref to c should be dead
+ if b.ref() is None:
+ a.count += 10 # ok
+ else:
+ a.count = 666 # not ok
+ fq = FQ()
class C(object):
pass
def g():
c = C()
c.b = B()
+ fq.register_finalizer(c.b)
ref = weakref.ref(c)
c.b.ref = ref
return ref
@@ -365,23 +516,32 @@
a = A()
expected_invalid = self.WREF_IS_INVALID_BEFORE_DEL_IS_CALLED
class B(object):
- def __del__(self):
- # when __del__ is called, the weakref to myself is still valid
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ # when we are here, the weakref to myself is still valid
# in RPython with most GCs. However, this can lead to strange
# bugs with incminimark. https://bugs.pypy.org/issue1687
# So with incminimark, we expect the opposite.
- if expected_invalid:
- if self.ref() is None:
- a.count += 10 # ok
+ while True:
+ b = self.next_dead()
+ if b is None:
+ break
+ if expected_invalid:
+ if b.ref() is None:
+ a.count += 10 # ok
+ else:
+ a.count = 666 # not ok
else:
- a.count = 666 # not ok
- else:
- if self.ref() is self:
- a.count += 10 # ok
- else:
- a.count = 666 # not ok
+ if b.ref() is b:
+ a.count += 10 # ok
+ else:
+ a.count = 666 # not ok
+ fq = FQ()
def g():
b = B()
+ fq.register_finalizer(b)
ref = weakref.ref(b)
b.ref = ref
return ref
@@ -399,10 +559,19 @@
class A(object):
pass
class B(object):
- def __del__(self):
- self.wref().x += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = B
+ def finalizer_trigger(self):
+ while True:
+ b = self.next_dead()
+ if b is None:
+ break
+ b.wref().x += 1
+ fq = FQ()
def g(a):
b = B()
+ fq.register_finalizer(b)
b.wref = weakref.ref(a)
# the only way to reach this weakref is via B, which is an
# object with finalizer (but the weakref itself points to
@@ -448,9 +617,14 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- b.num_deleted += 1
- b.all.append(D(b.num_deleted))
+ fq.register_finalizer(self)
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted += 1
+ b.all.append(D(b.num_deleted))
+ fq = FQ()
class D(object):
# make a big object that does not use malloc_varsize
def __init__(self, x):
@@ -461,6 +635,7 @@
i = 0
all = [None] * x
a = A()
+ del a
while i < x:
d = D(i)
all[i] = d
@@ -481,15 +656,24 @@
def __init__(self):
self.id = b.nextid
b.nextid += 1
- def __del__(self):
- llop.gc__collect(lltype.Void)
- b.num_deleted += 1
- C()
- C()
+ fq.register_finalizer(self)
class C(A):
- def __del__(self):
- b.num_deleted += 1
- b.num_deleted_c += 1
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ llop.gc__collect(lltype.Void)
+ b.num_deleted += 1
+ if isinstance(a, C):
+ b.num_deleted_c += 1
+ else:
+ C()
+ C()
+ fq = FQ()
def f(x, y):
persistent_a1 = A()
persistent_a2 = A()
diff --git a/rpython/memory/test/snippet.py b/rpython/memory/test/snippet.py
--- a/rpython/memory/test/snippet.py
+++ b/rpython/memory/test/snippet.py
@@ -1,5 +1,6 @@
import os, py
from rpython.tool.udir import udir
+from rpython.rlib import rgc
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
@@ -52,7 +53,7 @@
def set_age_of(c, newvalue):
# NB. this used to be a dictionary, but setting into a dict
# consumes memory. This has the effect that this test's
- # __del__ methods can consume more memory and potentially
+ # finalizer_trigger method can consume more memory and potentially
# cause another collection. This would result in objects
# being unexpectedly destroyed at the same 'state.time'.
state.age[ord(c) - ord('a')] = newvalue
@@ -61,12 +62,21 @@
def __init__(self, key):
self.key = key
self.refs = []
- def __del__(self):
+ fq.register_finalizer(self)
+
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
from rpython.rlib.debug import debug_print
- debug_print("DEL:", self.key)
- assert age_of(self.key) == -1
- set_age_of(self.key, state.time)
- state.progress = True
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ debug_print("DEL:", a.key)
+ assert age_of(a.key) == -1
+ set_age_of(a.key, state.time)
+ state.progress = True
+ fq = FQ()
def build_example(input):
state.time = 0
@@ -150,11 +160,22 @@
class B:
count = 0
class A:
- def __del__(self):
- self.b.count += 1
+ pass
+
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ a.b.count += 1
+ fq = FQ()
+
def g():
b = B()
a = A()
+ fq.register_finalizer(a)
a.b = b
i = 0
lst = [None]
diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -293,7 +293,7 @@
res = run([])
assert res == 42
- def define_finalizer(cls):
+ def define_destructor(cls):
class B(object):
pass
b = B()
@@ -316,6 +316,68 @@
return b.num_deleted
return f
From pypy.commits at gmail.com Thu May 5 02:59:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 04 May 2016 23:59:42 -0700 (PDT)
Subject: [pypy-commit] pypy default: Hide another debugging print
Message-ID: <572aef5e.43ecc20a.eb509.7dcf@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84211:a94009299a0e
Date: 2016-05-05 08:59 +0200
http://bitbucket.org/pypy/pypy/changeset/a94009299a0e/
Log: Hide another debugging print
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -744,7 +744,7 @@
try:
wrapper_gen = cache.wrapper_gens[signature]
except KeyError:
- print signature
+ #print signature
wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
signature)
cache.stats[0] += 1
From pypy.commits at gmail.com Thu May 5 03:05:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 00:05:04 -0700 (PDT)
Subject: [pypy-commit] pypy default: A passing test for objects that pass
through two different
Message-ID: <572af0a0.143f1c0a.10891.6c6b@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84212:eef230af28f1
Date: 2016-05-05 09:05 +0200
http://bitbucket.org/pypy/pypy/changeset/eef230af28f1/
Log: A passing test for objects that pass through two different
FinalizerQueues in sequence
diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py
--- a/rpython/memory/test/gc_test_base.py
+++ b/rpython/memory/test/gc_test_base.py
@@ -244,6 +244,48 @@
res = self.interpret(f, [5])
assert res in (3008, 4008, 5008), "res == %d" % (res,)
+ def test_finalizer_two_queues_in_sequence(self):
+ class B(object):
+ pass
+ b = B()
+ b.nextid = 0
+ b.num_deleted_1 = 0
+ b.num_deleted_2 = 0
+ class A(object):
+ def __init__(self):
+ self.id = b.nextid
+ b.nextid += 1
+ fq1.register_finalizer(self)
+ class FQ1(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while True:
+ a = self.next_dead()
+ if a is None:
+ break
+ b.num_deleted_1 += 1
+ fq2.register_finalizer(a)
+ class FQ2(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ while self.next_dead() is not None:
+ b.num_deleted_2 += 1
+ fq1 = FQ1()
+ fq2 = FQ2()
+ def f(x):
+ A()
+ i = 0
+ while i < x:
+ i += 1
+ A()
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ llop.gc__collect(lltype.Void)
+ return b.num_deleted_1 + b.num_deleted_2 * 1000
+ res = self.interpret(f, [5])
+ assert res == 6006
+
def test_finalizer_calls_malloc(self):
class B(object):
pass
From pypy.commits at gmail.com Thu May 5 04:01:36 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 01:01:36 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: A branch to use FinalizerQueue
inside pypy
Message-ID: <572afde0.a1ccc20a.f149c.ffff94c3@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84213:efd8be9a50aa
Date: 2016-05-05 10:00 +0200
http://bitbucket.org/pypy/pypy/changeset/efd8be9a50aa/
Log: A branch to use FinalizerQueue inside pypy
From pypy.commits at gmail.com Thu May 5 09:18:32 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 06:18:32 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Use @rgc.must_be_light_finalizer
on classes
Message-ID: <572b4828.d5da1c0a.f066.0c69@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84214:b00c736dfd7c
Date: 2016-05-05 10:19 +0200
http://bitbucket.org/pypy/pypy/changeset/b00c736dfd7c/
Log: Use @rgc.must_be_light_finalizer on classes
diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py
--- a/rpython/annotator/classdesc.py
+++ b/rpython/annotator/classdesc.py
@@ -579,6 +579,14 @@
if cls not in FORCE_ATTRIBUTES_INTO_CLASSES:
self.all_enforced_attrs = [] # no attribute allowed
+ if (getattr(cls, '_must_be_light_finalizer_', False) and
+ hasattr(cls, '__del__') and
+ not getattr(cls.__del__, '_must_be_light_finalizer_', False)):
+ raise AnnotatorError(
+ "Class %r is in a class hierarchy with "
+ "_must_be_light_finalizer_ = True, but it has a "
+ "destructor without @rgc.must_be_light_finalizer" % (cls,))
+
def add_source_attribute(self, name, value, mixin=False):
if isinstance(value, property):
# special case for property object
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4584,6 +4584,32 @@
e = py.test.raises(Exception, a.build_types, f, [])
assert str(e.value) == "Don't know how to represent Ellipsis"
+ def test_must_be_light_finalizer(self):
+ from rpython.rlib import rgc
+ @rgc.must_be_light_finalizer
+ class A(object):
+ pass
+ class B(A):
+ def __del__(self):
+ pass
+ class C(A):
+ @rgc.must_be_light_finalizer
+ def __del__(self):
+ pass
+ class D(object):
+ def __del__(self):
+ pass
+ def fb():
+ B()
+ def fc():
+ C()
+ def fd():
+ D()
+ a = self.RPythonAnnotator()
+ a.build_types(fc, [])
+ a.build_types(fd, [])
+ py.test.raises(AnnotatorError, a.build_types, fb, [])
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -362,6 +362,16 @@
return func
def must_be_light_finalizer(func):
+ """Mark a __del__ method as being a destructor, calling only a limited
+ set of operations. See pypy/doc/discussion/finalizer-order.rst.
+
+ If you use the same decorator on a class, this class and all its
+ subclasses are only allowed to have __del__ methods which are
+ similarly decorated (or no __del__ at all). It prevents a class
+ hierarchy from having destructors in some parent classes, which are
+ overridden in subclasses with (non-light, old-style) finalizers.
+ (This case is the original motivation for FinalizerQueue.)
+ """
func._must_be_light_finalizer_ = True
return func
From pypy.commits at gmail.com Thu May 5 09:18:34 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 06:18:34 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: in-progress
Message-ID: <572b482a.8d1f1c0a.7bc4a.0b76@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84215:3c2a2910cc82
Date: 2016-05-05 15:18 +0200
http://bitbucket.org/pypy/pypy/changeset/3c2a2910cc82/
Log: in-progress
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,10 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
-
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
- """
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"""
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -389,9 +374,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -515,75 +515,70 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
def perform(self, executioncontext, frame):
if self.finalizers_lock_count > 0:
return
self._run_finalizers()
+ def _report_error(self, e, where, w_obj):
+ space = self.space
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
+
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
- space = self.space
- while pending is not None:
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
try:
- pending.callback(pending.w_obj)
- except OperationError as e:
- e.write_unraisable(space, pending.descrname, pending.w_obj)
- e.clear(space) # break up reference cycles
- pending = pending.next
- #
- # Note: 'dying_objects' used to be just a regular list instead
- # of a chained list. This was the cause of "leaks" if we have a
- # program that constantly creates new objects with finalizers.
- # Here is why: say 'dying_objects' is a long list, and there
- # are n instances in it. Then we spend some time in this
- # function, possibly triggering more GCs, but keeping the list
- # of length n alive. Then the list is suddenly freed at the
- # end, and we return to the user program. At this point the
- # GC limit is still very high, because just before, there was
- # a list of length n alive. Assume that the program continues
- # to allocate a lot of instances with finalizers. The high GC
- # limit means that it could allocate a lot of instances before
- # reaching it --- possibly more than n. So the whole procedure
- # repeats with higher and higher values of n.
- #
- # This does not occur in the current implementation because
- # there is no list of length n: if n is large, then the GC
- # will run several times while walking the list, but it will
- # see lower and lower memory usage, with no lower bound of n.
+ self.space.userdel(w_obj)
+ except Exception as e:
+ self._report_error(e, "method __del__ of ", w_obj)
+
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ self._report_error(e, "internal finalizer of ", w_obj)
+
+
+def make_finalizer_queue(W_Root, space):
+ """Make a FinalizerQueue subclass which responds to GC finalizer
+ events by 'firing' the UserDelAction class above. It does not
+ directly fetches the objects to finalize at all; they stay in the
+ GC-managed queue, and will only be fetched by UserDelAction
+ (between bytecodes)."""
+
+ class WRootFinalizerQueue(rgc.FinalizerQueue):
+ Class = W_Root
+
+ def finalizer_trigger(self):
+ space.user_del_action.fire()
+
+ space.user_del_action = UserDelAction(space)
+ space.finalizer_queue = WRootFinalizerQueue()
From pypy.commits at gmail.com Thu May 5 10:59:49 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 07:59:49 -0700 (PDT)
Subject: [pypy-commit] pypy numpy_broadcast_nd: Close branch
numpy_broadcast_nd
Message-ID: <572b5fe5.69cdc20a.cf664.4b6c@mx.google.com>
Author: Matti Picus
Branch: numpy_broadcast_nd
Changeset: r84216:a97af41ff13e
Date: 2016-05-05 17:59 +0300
http://bitbucket.org/pypy/pypy/changeset/a97af41ff13e/
Log: Close branch numpy_broadcast_nd
From pypy.commits at gmail.com Thu May 5 11:03:31 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:03:31 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: in-progress
Message-ID: <572b60c3.89141c0a.eea66.3bbf@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84217:bc676182f7b4
Date: 2016-05-05 17:03 +0200
http://bitbucket.org/pypy/pypy/changeset/bc676182f7b4/
Log: in-progress
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -152,9 +152,34 @@
lifeline.clear_all_weakrefs()
def _finalize_(self):
- """The RPython-level finalizer.
+ """The RPython-level finalizer.
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"""
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized.
+ """
+
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
+ """
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -103,26 +103,21 @@
# we need two subclasses of the app-level type, one to add mapdict, and then one
# to add del to not slow down the GC.
-def get_unique_interplevel_subclass(space, cls, needsdel=False):
+def get_unique_interplevel_subclass(space, cls):
"NOT_RPYTHON: initialization-time only"
- if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
- needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = space, cls, needsdel
+ key = space, cls
try:
return _subclass_cache[key]
except KeyError:
- # XXX can save a class if cls already has a __del__
- if needsdel:
- cls = get_unique_interplevel_subclass(space, cls, False)
- subcls = _getusercls(space, cls, needsdel)
+ subcls = _getusercls(space, cls)
assert key not in _subclass_cache
_subclass_cache[key] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
-def _getusercls(space, cls, wants_del, reallywantdict=False):
+def _getusercls(space, cls, reallywantdict=False):
from rpython.rlib import objectmodel
from pypy.objspace.std.objectobject import W_ObjectObject
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
@@ -132,11 +127,10 @@
typedef = cls.typedef
name = cls.__name__ + "User"
- mixins_needed = []
if cls is W_ObjectObject or cls is W_InstanceObject:
- mixins_needed.append(_make_storage_mixin_size_n())
+ base_mixin = _make_storage_mixin_size_n()
else:
- mixins_needed.append(MapdictStorageMixin)
+ base_mixin = MapdictStorageMixin
copy_methods = [BaseUserClassMapdict]
if reallywantdict or not typedef.hasdict:
# the type has no dict, mapdict to provide the dict
@@ -147,44 +141,12 @@
# support
copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
- if wants_del:
- # This subclass comes with an app-level __del__. To handle
- # it, we make an RPython-level __del__ method. This
- # RPython-level method is called directly by the GC and it
- # cannot do random things (calling the app-level __del__ would
- # be "random things"). So instead, we just call here
- # enqueue_for_destruction(), and the app-level __del__ will be
- # called later at a safe point (typically between bytecodes).
- # If there is also an inherited RPython-level __del__, it is
- # called afterwards---not immediately! This base
- # RPython-level __del__ is supposed to run only when the
- # object is not reachable any more. NOTE: it doesn't fully
- # work: see issue #2287.
- name += "Del"
- parent_destructor = getattr(cls, '__del__', None)
- def call_parent_del(self):
- assert isinstance(self, subcls)
- parent_destructor(self)
- def call_applevel_del(self):
- assert isinstance(self, subcls)
- space.userdel(self)
- class Proto(object):
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(space, call_applevel_del,
- 'method __del__ of ')
- if parent_destructor is not None:
- self.enqueue_for_destruction(space, call_parent_del,
- 'internal destructor of ')
- mixins_needed.append(Proto)
class subcls(cls):
user_overridden_class = True
- for base in mixins_needed:
- objectmodel.import_from_mixin(base)
+ objectmodel.import_from_mixin(base_mixin)
for copycls in copy_methods:
_copy_methods(copycls, subcls)
- del subcls.base
subcls.__name__ = name
return subcls
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -357,11 +357,12 @@
if cls.typedef.applevel_subclasses_base is not None:
cls = cls.typedef.applevel_subclasses_base
#
- subcls = get_unique_interplevel_subclass(
- self, cls, w_subtype.needsdel)
+ subcls = get_unique_interplevel_subclass(self, cls)
instance = instantiate(subcls)
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
+ if w_subtype.hasuserdel:
+ space.finalizer_queue.register_finalizer(instance)
else:
raise oefmt(self.w_TypeError,
"%N.__new__(%N): only for the type %N",
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -132,7 +132,7 @@
"flag_sequence_bug_compat",
"flag_map_or_seq", # '?' or 'M' or 'S'
"compares_by_identity_status?",
- 'needsdel',
+ 'hasuserdel',
'weakrefable',
'hasdict',
'layout',
@@ -160,7 +160,7 @@
w_self.bases_w = bases_w
w_self.dict_w = dict_w
w_self.hasdict = False
- w_self.needsdel = False
+ w_self.hasuserdel = False
w_self.weakrefable = False
w_self.w_doc = space.w_None
w_self.weak_subclasses = []
@@ -289,7 +289,7 @@
# compute a tuple that fully describes the instance layout
def get_full_instance_layout(w_self):
layout = w_self.layout
- return (layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable)
+ return (layout, w_self.hasdict, w_self.weakrefable)
def compute_default_mro(w_self):
return compute_C3_mro(w_self.space, w_self)
@@ -986,7 +986,7 @@
hasoldstylebase = True
continue
w_self.hasdict = w_self.hasdict or w_base.hasdict
- w_self.needsdel = w_self.needsdel or w_base.needsdel
+ w_self.hasuserdel = w_self.hasuserdel or w_base.hasuserdel
w_self.weakrefable = w_self.weakrefable or w_base.weakrefable
return hasoldstylebase
@@ -1028,7 +1028,7 @@
if wantweakref:
create_weakref_slot(w_self)
if '__del__' in dict_w:
- w_self.needsdel = True
+ w_self.hasuserdel = True
#
if index_next_extra_slot == base_layout.nslots and not force_new_layout:
return base_layout
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -428,9 +428,11 @@
self._weakrefs = set()
self._queue = collections.deque()
+ def _already_registered(self, obj):
+ return hasattr(obj, '__enable_del_for_id')
+
def _untranslated_register_finalizer(self, obj):
- if hasattr(obj, '__enable_del_for_id'):
- return # already called
+ assert not self._already_registered(obj)
if not hasattr(self, '_queue'):
self._reset()
From pypy.commits at gmail.com Thu May 5 11:26:17 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:26:17 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: progress
Message-ID: <572b6619.41cec20a.5b41c.57ca@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84218:ea48aefe929c
Date: 2016-05-05 17:26 +0200
http://bitbucket.org/pypy/pypy/changeset/ea48aefe929c/
Log: progress
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -24,6 +24,8 @@
self.bases = bases
self.heaptype = False
self.hasdict = '__dict__' in rawdict
+ # no __del__: use an RPython _finalize_() method and register_finalizer
+ assert '__del__' not in rawdict
self.weakrefable = '__weakref__' in rawdict
self.doc = rawdict.pop('__doc__', None)
for base in bases:
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -44,13 +44,12 @@
self.bases_w = bases
self.w_dict = w_dict
+ def has_user_del(self, space):
+ return self.lookup(space, '__del__') is not None
+
def instantiate(self, space):
cache = space.fromcache(Cache)
- if self.lookup(space, '__del__') is not None:
- w_inst = cache.cls_with_del(space, self)
- else:
- w_inst = cache.cls_without_del(space, self)
- return w_inst
+ return cache.InstanceObjectCls(space, self)
def getdict(self, space):
return self.w_dict
@@ -132,9 +131,9 @@
self.setbases(space, w_value)
return
elif name == "__del__":
- if self.lookup(space, name) is None:
+ if not self.has_user_del(space):
msg = ("a __del__ method added to an existing class will "
- "not be called")
+ "only be called on instances made from now on")
space.warn(space.wrap(msg), space.w_RuntimeWarning)
space.setitem(self.w_dict, w_attr, w_value)
@@ -184,14 +183,10 @@
if hasattr(space, 'is_fake_objspace'):
# hack: with the fake objspace, we don't want to see typedef's
# _getusercls() at all
- self.cls_without_del = W_InstanceObject
- self.cls_with_del = W_InstanceObject
return
- self.cls_without_del = _getusercls(
- space, W_InstanceObject, False, reallywantdict=True)
- self.cls_with_del = _getusercls(
- space, W_InstanceObject, True, reallywantdict=True)
+ self.InstanceObjectCls = _getusercls(
+ space, W_InstanceObject, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
@@ -297,12 +292,15 @@
class W_InstanceObject(W_Root):
def __init__(self, space, w_class):
# note that user_setup is overridden by the typedef.py machinery
+ self.space = space
self.user_setup(space, space.gettypeobject(self.typedef))
assert isinstance(w_class, W_ClassObject)
self.w_class = w_class
+ if w_class.has_user_del(space):
+ space.finalizer_queue.register_finalizer(self)
def user_setup(self, space, w_subtype):
- self.space = space
+ pass
def set_oldstyle_class(self, space, w_class):
if w_class is None or not isinstance(w_class, W_ClassObject):
@@ -368,8 +366,7 @@
self.set_oldstyle_class(space, w_value)
return
if name == '__del__' and w_meth is None:
- cache = space.fromcache(Cache)
- if (not isinstance(self, cache.cls_with_del)
+ if (not self.w_class.has_user_del(space)
and self.getdictvalue(space, '__del__') is None):
msg = ("a __del__ method added to an instance with no "
"__del__ in the class will not be called")
@@ -646,9 +643,8 @@
raise oefmt(space.w_TypeError, "instance has no next() method")
return space.call_function(w_func)
- def descr_del(self, space):
- # Note that this is called from executioncontext.UserDelAction
- # via the space.userdel() method.
+ def _finalize_(self):
+ space = self.space
w_func = self.getdictvalue(space, '__del__')
if w_func is None:
w_func = self.getattr_from_class(space, '__del__')
@@ -729,7 +725,6 @@
__pow__ = interp2app(W_InstanceObject.descr_pow),
__rpow__ = interp2app(W_InstanceObject.descr_rpow),
next = interp2app(W_InstanceObject.descr_next),
- __del__ = interp2app(W_InstanceObject.descr_del),
__exit__ = interp2app(W_InstanceObject.descr_exit),
__dict__ = dict_descr,
**rawdict
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -440,14 +440,16 @@
# Fetch and check the type of 'obj'
objtyp = obj.__class__
assert isinstance(objtyp, type), (
- "to run register_finalizer() untranslated, "
- "the object's class must be new-style")
+ "%r: to run register_finalizer() untranslated, "
+ "the object's class must be new-style" % (obj,))
assert hasattr(obj, '__dict__'), (
- "to run register_finalizer() untranslated, "
- "the object must have a __dict__")
- assert not hasattr(obj, '__slots__'), (
- "to run register_finalizer() untranslated, "
- "the object must not have __slots__")
+ "%r: to run register_finalizer() untranslated, "
+ "the object must have a __dict__" % (obj,))
+ assert (not hasattr(obj, '__slots__') or
+ type(obj).__slots__ == () or
+ type(obj).__slots__ == ('__weakref__',)), (
+ "%r: to run register_finalizer() untranslated, "
+ "the object must not have __slots__" % (obj,))
# The first time, patch the method __del__ of the class, if
# any, so that we can disable it on the original 'obj' and
From pypy.commits at gmail.com Thu May 5 11:27:09 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:27:09 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: fix
Message-ID: <572b664d.c61ec20a.b18a4.5e05@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84219:4ba4e4bb0c62
Date: 2016-05-05 17:27 +0200
http://bitbucket.org/pypy/pypy/changeset/4ba4e4bb0c62/
Log: fix
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -362,7 +362,7 @@
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
if w_subtype.hasuserdel:
- space.finalizer_queue.register_finalizer(instance)
+ self.finalizer_queue.register_finalizer(instance)
else:
raise oefmt(self.w_TypeError,
"%N.__new__(%N): only for the type %N",
From pypy.commits at gmail.com Thu May 5 11:39:06 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:39:06 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Remove GeneratorIteratorWithDel
Message-ID: <572b691a.a553c20a.33b82.6178@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84220:e2d01729c202
Date: 2016-05-05 17:39 +0200
http://bitbucket.org/pypy/pypy/changeset/e2d01729c202/
Log: Remove GeneratorIteratorWithDel
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,6 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
+from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY
from rpython.rlib import jit
@@ -13,6 +14,8 @@
self.frame = frame # turned into None when frame_finished_execution
self.pycode = frame.pycode
self.running = False
+ if self.pycode.co_flags & CO_YIELD_INSIDE_TRY:
+ self.register_finalizer(self.space)
def descr__repr__(self, space):
if self.pycode is None:
@@ -139,7 +142,6 @@
def descr_close(self):
"""x.close(arg) -> raise GeneratorExit inside generator."""
- assert isinstance(self, GeneratorIterator)
space = self.space
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
@@ -212,25 +214,21 @@
unpack_into = _create_unpack_into()
unpack_into_w = _create_unpack_into()
-
-class GeneratorIteratorWithDel(GeneratorIterator):
-
- def __del__(self):
- # Only bother enqueuing self to raise an exception if the frame is
- # still not finished and finally or except blocks are present.
- self.clear_all_weakrefs()
+ def _finalize_(self):
+ # This is only called if the CO_YIELD_INSIDE_TRY flag is set
+ # on the code object. If the frame is still not finished and
+ # finally or except blocks are present at the current
+ # position, then raise a GeneratorExit. Otherwise, there is
+ # no point.
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
if not isinstance(block, LoopBlock):
- self.enqueue_for_destruction(self.space,
- GeneratorIterator.descr_close,
- "interrupting generator of ")
+ self.descr_close()
break
block = block.previous
-
def get_printable_location_genentry(bytecode):
return '%s ' % (bytecode.get_repr(),)
generatorentry_driver = jit.JitDriver(greens=['pycode'],
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -241,12 +241,8 @@
def run(self):
"""Start this frame's execution."""
if self.getcode().co_flags & pycode.CO_GENERATOR:
- if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY:
- from pypy.interpreter.generator import GeneratorIteratorWithDel
- return self.space.wrap(GeneratorIteratorWithDel(self))
- else:
- from pypy.interpreter.generator import GeneratorIterator
- return self.space.wrap(GeneratorIterator(self))
+ from pypy.interpreter.generator import GeneratorIterator
+ return self.space.wrap(GeneratorIterator(self))
else:
return self.execute_frame()
From pypy.commits at gmail.com Thu May 5 11:45:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:45:43 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix test_typedef, and fix W_File
Message-ID: <572b6aa7.49961c0a.938e1.4a81@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84221:a594bdb4f9aa
Date: 2016-05-05 17:45 +0200
http://bitbucket.org/pypy/pypy/changeset/a594bdb4f9aa/
Log: Fix test_typedef, and fix W_File
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -186,35 +186,20 @@
class W_Level1(W_Root):
def __init__(self, space1):
assert space1 is space
- def __del__(self):
+ self.register_finalizer(space)
+ def _finalize_(self):
space.call_method(w_seen, 'append', space.wrap(1))
- class W_Level2(W_Root):
- def __init__(self, space1):
- assert space1 is space
- def __del__(self):
- self.enqueue_for_destruction(space, W_Level2.destructormeth,
- 'FOO ')
- def destructormeth(self):
- space.call_method(w_seen, 'append', space.wrap(2))
W_Level1.typedef = typedef.TypeDef(
'level1',
__new__ = typedef.generic_new_descr(W_Level1))
- W_Level2.typedef = typedef.TypeDef(
- 'level2',
- __new__ = typedef.generic_new_descr(W_Level2))
#
w_seen = space.newlist([])
W_Level1(space)
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [1]
- #
- w_seen = space.newlist([])
- W_Level2(space)
- gc.collect(); gc.collect()
assert space.str_w(space.repr(w_seen)) == "[]" # not called yet
ec = space.getexecutioncontext()
self.space.user_del_action.perform(ec, None)
- assert space.unwrap(w_seen) == [2]
+ assert space.unwrap(w_seen) == [1] # called by user_del_action
#
w_seen = space.newlist([])
self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
@@ -236,29 +221,17 @@
A4()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [4, 1]
+ assert space.unwrap(w_seen) == [4, 1] # user __del__, and _finalize_
#
w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef)],
+ self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
"""(level2):
class A5(level2):
pass
A5()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [2]
- #
- w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef),
- w_seen],
- """(level2, seen):
- class A6(level2):
- def __del__(self):
- seen.append(6)
- A6()
- """)
- gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [6, 2]
+ assert space.unwrap(w_seen) == [1] # _finalize_ only
def test_multiple_inheritance(self):
class W_A(W_Root):
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -44,21 +44,16 @@
def __init__(self, space):
self.space = space
- def __del__(self):
+ def _finalize_(self):
# assume that the file and stream objects are only visible in the
- # thread that runs __del__, so no race condition should be possible
- self.clear_all_weakrefs()
+ # thread that runs _finalize_, so no race condition should be
+ # possible and no locking is done here.
if self.stream is not None:
- self.enqueue_for_destruction(self.space, W_File.destructor,
- 'close() method of ')
-
- def destructor(self):
- assert isinstance(self, W_File)
- try:
- self.direct_close()
- except StreamErrors as e:
- operr = wrap_streamerror(self.space, e, self.w_name)
- raise operr
+ try:
+ self.direct_close()
+ except StreamErrors as e:
+ operr = wrap_streamerror(self.space, e, self.w_name)
+ raise operr
def fdopenstream(self, stream, fd, mode, w_name=None):
self.fd = fd
From pypy.commits at gmail.com Thu May 5 11:56:59 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:56:59 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: oops
Message-ID: <572b6d4b.143f1c0a.10891.4d6f@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84223:cb9315a55345
Date: 2016-05-05 17:57 +0200
http://bitbucket.org/pypy/pypy/changeset/cb9315a55345/
Log: oops
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -43,6 +43,7 @@
def __init__(self, space):
self.space = space
+ self.register_finalizer(space)
def _finalize_(self):
# assume that the file and stream objects are only visible in the
From pypy.commits at gmail.com Thu May 5 11:57:01 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:57:01 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: fix
Message-ID: <572b6d4d.c9b0c20a.621e.69cd@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84224:3d24694b2062
Date: 2016-05-05 17:57 +0200
http://bitbucket.org/pypy/pypy/changeset/3d24694b2062/
Log: fix
diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -4,7 +4,7 @@
from pypy.interpreter.function import Function, Method
from pypy.interpreter.module import Module
from pypy.interpreter.pytraceback import PyTraceback
-from pypy.interpreter.generator import GeneratorIteratorWithDel
+from pypy.interpreter.generator import GeneratorIterator
from rpython.rlib.objectmodel import instantiate
from pypy.interpreter.gateway import unwrap_spec
from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject
@@ -59,7 +59,7 @@
return space.wrap(tb)
def generator_new(space):
- new_generator = instantiate(GeneratorIteratorWithDel)
+ new_generator = instantiate(GeneratorIterator)
return space.wrap(new_generator)
@unwrap_spec(current=int, remaining=int, step=int)
From pypy.commits at gmail.com Thu May 5 11:56:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 08:56:57 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Change repr
Message-ID: <572b6d49.161b1c0a.21c73.53da@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84222:12a84fcd4694
Date: 2016-05-05 17:57 +0200
http://bitbucket.org/pypy/pypy/changeset/12a84fcd4694/
Log: Change repr
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -564,7 +564,7 @@
try:
w_obj._finalize_()
except Exception as e:
- self._report_error(e, "internal finalizer of ", w_obj)
+ self._report_error(e, "finalizer of ", w_obj)
def make_finalizer_queue(W_Root, space):
From pypy.commits at gmail.com Thu May 5 12:15:58 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 09:15:58 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix _io
Message-ID: <572b71be.4ca51c0a.f2226.534a@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84226:bf03d5356e33
Date: 2016-05-05 18:16 +0200
http://bitbucket.org/pypy/pypy/changeset/bf03d5356e33/
Log: Fix _io
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -952,9 +952,15 @@
self.w_writer = None
raise
- def __del__(self):
- self.clear_all_weakrefs()
+ def _finalize_(self):
# Don't call the base __del__: do not close the files!
+ # Usually the _finalize_() method is not called at all because
+ # we set 'needs_to_finalize = False' in this class, so
+ # W_IOBase.__init__() won't call register_finalizer().
+ # However, this method might still be called: if the user
+ # makes an app-level subclass and adds a custom __del__.
+ pass
+ needs_to_finalize = False
# forward to reader
for method in ['read', 'peek', 'read1', 'readinto', 'readable']:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -59,6 +59,8 @@
self.__IOBase_closed = False
if add_to_autoflusher:
get_autoflusher(space).add(self)
+ if self.needs_to_finalize:
+ self.register_finalizer(space)
def getdict(self, space):
return self.w_dict
@@ -71,13 +73,7 @@
return True
return False
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_IOBase.destructor,
- 'internal __del__ of ')
-
- def destructor(self):
- assert isinstance(self, W_IOBase)
+ def _finalize_(self):
space = self.space
w_closed = space.findattr(self, space.wrap('closed'))
try:
@@ -90,6 +86,7 @@
# equally as bad, and potentially more frequent (because of
# shutdown issues).
pass
+ needs_to_finalize = True
def _CLOSED(self):
# Use this macro whenever you want to check the internal `closed`
From pypy.commits at gmail.com Thu May 5 14:15:46 2016
From: pypy.commits at gmail.com (rlamy)
Date: Thu, 05 May 2016 11:15:46 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix test_stressdict
Message-ID: <572b8dd2.c5381c0a.7ad43.ffff8830@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r84227:abe796ff4415
Date: 2016-05-05 19:14 +0100
http://bitbucket.org/pypy/pypy/changeset/abe796ff4415/
Log: Fix test_stressdict
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -534,11 +534,10 @@
if type(w_key) is self.space.UnicodeObjectCls:
self.switch_to_unicode_strategy(w_dict)
return
- if type(w_key) is W_IntObject:
+ w_type = self.space.type(w_key)
+ if self.space.is_w(w_type, self.space.w_int):
self.switch_to_int_strategy(w_dict)
- return
- w_type = self.space.type(w_key)
- if w_type.compares_by_identity():
+ elif w_type.compares_by_identity():
self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
From pypy.commits at gmail.com Thu May 5 14:37:46 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Thu, 05 May 2016 11:37:46 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Startup fix, insert missing py3 opcodes
Message-ID: <572b92fa.a9a1c20a.a747e.ffff9f7f@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r84228:63776da5edf3
Date: 2016-05-05 20:36 +0200
http://bitbucket.org/pypy/pypy/changeset/63776da5edf3/
Log: Startup fix, insert missing py3 opcodes
diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py
--- a/lib-python/3/opcode.py
+++ b/lib-python/3/opcode.py
@@ -112,7 +112,8 @@
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
-def_op('WITH_CLEANUP', 81)
+def_op('WITH_CLEANUP_START', 81)
+def_op('WITH_CLEANUP_FINISH', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
@@ -198,6 +199,12 @@
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
+def_op('BUILD_LIST_UNPACK', 149)
+def_op('BUILD_MAP_UNPACK', 150)
+def_op('BUILD_MAP_UNPACK_WITH_CALL', 151)
+def_op('BUILD_TUPLE_UNPACK', 152)
+def_op('BUILD_SET_UNPACK', 153)
+
# pypy modification, experimental bytecode
def_op('LOOKUP_METHOD', 201) # Index in name list
hasname.append(201)
From pypy.commits at gmail.com Thu May 5 15:55:46 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 12:55:46 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Port _weakref:
enqueue_for_destruction() doesn't exist any more
Message-ID: <572ba542.21f9c20a.d72fa.ffffbea6@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84229:9fbfc373d95b
Date: 2016-05-05 21:55 +0200
http://bitbucket.org/pypy/pypy/changeset/9fbfc373d95b/
Log: Port _weakref: enqueue_for_destruction() doesn't exist any more
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -533,18 +533,6 @@
return
self._run_finalizers()
- def _report_error(self, e, where, w_obj):
- space = self.space
- if isinstance(e, OperationError):
- e.write_unraisable(space, where, w_obj)
- e.clear(space) # break up reference cycles
- else:
- addrstring = w_obj.getaddrstring(space)
- msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
- str(e), where, space.type(w_obj).name, addrstring))
- space.call_method(space.sys.get('stderr'), 'write',
- space.wrap(msg))
-
def _run_finalizers(self):
while True:
w_obj = self.space.finalizer_queue.next_dead()
@@ -558,13 +546,25 @@
try:
self.space.userdel(w_obj)
except Exception as e:
- self._report_error(e, "method __del__ of ", w_obj)
+ report_error(self.space, e, "method __del__ of ", w_obj)
# Call the RPython-level _finalize_() method.
try:
w_obj._finalize_()
except Exception as e:
- self._report_error(e, "finalizer of ", w_obj)
+ report_error(self.space, e, "finalizer of ", w_obj)
+
+
+def report_error(space, e, where, w_obj):
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
def make_finalizer_queue(W_Root, space):
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -3,7 +3,8 @@
from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, ObjSpace
from pypy.interpreter.typedef import TypeDef
-from rpython.rlib import jit
+from pypy.interpreter.executioncontext import AsyncAction, report_error
+from rpython.rlib import jit, rgc
from rpython.rlib.rshrinklist import AbstractShrinkList
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rweakref import dead_ref
@@ -19,6 +20,7 @@
cached_weakref = None
cached_proxy = None
other_refs_weak = None
+ has_callbacks = False
def __init__(self, space):
self.space = space
@@ -99,31 +101,11 @@
return w_ref
return space.w_None
-
-class WeakrefLifelineWithCallbacks(WeakrefLifeline):
-
- def __init__(self, space, oldlifeline=None):
- self.space = space
- if oldlifeline is not None:
- self.cached_weakref = oldlifeline.cached_weakref
- self.cached_proxy = oldlifeline.cached_proxy
- self.other_refs_weak = oldlifeline.other_refs_weak
-
- def __del__(self):
- """This runs when the interp-level object goes away, and allows
- its lifeline to go away. The purpose of this is to activate the
- callbacks even if there is no __del__ method on the interp-level
- W_Root subclass implementing the object.
- """
- if self.other_refs_weak is None:
- return
- items = self.other_refs_weak.items()
- for i in range(len(items)-1, -1, -1):
- w_ref = items[i]()
- if w_ref is not None and w_ref.w_callable is not None:
- w_ref.enqueue_for_destruction(self.space,
- W_WeakrefBase.activate_callback,
- 'weakref callback of ')
+ def enable_callbacks(self):
+ if not self.has_callbacks:
+ fq = self.space.fromcache(Cache).fq
+ fq.register_finalizer(self)
+ self.has_callbacks = True
@jit.dont_look_inside
def make_weakref_with_callback(self, w_subtype, w_obj, w_callable):
@@ -131,6 +113,7 @@
w_ref = space.allocate_instance(W_Weakref, w_subtype)
W_Weakref.__init__(w_ref, space, w_obj, w_callable)
self.append_wref_to(w_ref)
+ self.enable_callbacks()
return w_ref
@jit.dont_look_inside
@@ -141,8 +124,44 @@
else:
w_proxy = W_Proxy(space, w_obj, w_callable)
self.append_wref_to(w_proxy)
+ self.enable_callbacks()
return w_proxy
+
+class WeakrefCallbackAction(AsyncAction):
+ """An action that runs when a W_Root object goes away, and allows
+ its lifeline to go away. It activates all the callbacks of all
+ the dying lifelines.
+ """
+
+ def perform(self, executioncontext, frame):
+ fq = self.space.fromcache(Cache).fq
+ while True:
+ lifeline = fq.next_dead()
+ if lifeline is None:
+ break
+ if lifeline.other_refs_weak is None:
+ continue # should never be the case, but better safe than sorry
+ items = lifeline.other_refs_weak.items()
+ for i in range(len(items)-1, -1, -1):
+ w_ref = items[i]()
+ if w_ref is not None and w_ref.w_callable is not None:
+ try:
+ w_ref.activate_callback()
+ except Exception as e:
+ report_error(self.space, e,
+ "weakref callback ", w_ref.w_callable)
+
+class Cache:
+ def __init__(self, space):
+ class WeakrefFinalizerQueue(rgc.FinalizerQueue):
+ Class = WeakrefLifeline
+ def finalizer_trigger(self):
+ space.weakref_callback_action.fire()
+ space.weakref_callback_action = WeakrefCallbackAction(space)
+ self.fq = WeakrefFinalizerQueue()
+
+
# ____________________________________________________________
@@ -163,7 +182,6 @@
self.w_obj_weak = dead_ref
def activate_callback(w_self):
- assert isinstance(w_self, W_WeakrefBase)
w_self.space.call_function(w_self.w_callable, w_self)
def descr__repr__(self, space):
@@ -227,32 +245,16 @@
w_obj.setweakref(space, lifeline)
return lifeline
-def getlifelinewithcallbacks(space, w_obj):
- lifeline = w_obj.getweakref()
- if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None
- oldlifeline = lifeline
- lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline)
- w_obj.setweakref(space, lifeline)
- return lifeline
-
-
-def get_or_make_weakref(space, w_subtype, w_obj):
- return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj)
-
-
-def make_weakref_with_callback(space, w_subtype, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
-
def descr__new__weakref(space, w_subtype, w_obj, w_callable=None,
__args__=None):
if __args__.arguments_w:
raise oefmt(space.w_TypeError, "__new__ expected at most 2 arguments")
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_weakref(space, w_subtype, w_obj)
+ return lifeline.get_or_make_weakref(w_subtype, w_obj)
else:
- return make_weakref_with_callback(space, w_subtype, w_obj, w_callable)
+ return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
W_Weakref.typedef = TypeDef("weakref",
__doc__ = """A weak reference to an object 'obj'. A 'callback' can be given,
@@ -308,23 +310,15 @@
return space.call_args(w_obj, __args__)
-def get_or_make_proxy(space, w_obj):
- return getlifeline(space, w_obj).get_or_make_proxy(w_obj)
-
-
-def make_proxy_with_callback(space, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_proxy_with_callback(w_obj, w_callable)
-
-
def proxy(space, w_obj, w_callable=None):
"""Create a proxy object that weakly references 'obj'.
'callback', if given, is called with the proxy as an argument when 'obj'
is about to be finalized."""
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_proxy(space, w_obj)
+ return lifeline.get_or_make_proxy(w_obj)
else:
- return make_proxy_with_callback(space, w_obj, w_callable)
+ return lifeline.make_proxy_with_callback(w_obj, w_callable)
def descr__new__proxy(space, w_subtype, w_obj, w_callable=None):
raise oefmt(space.w_TypeError, "cannot create 'weakproxy' instances")
diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py
--- a/pypy/module/_weakref/test/test_weakref.py
+++ b/pypy/module/_weakref/test/test_weakref.py
@@ -1,6 +1,9 @@
class AppTestWeakref(object):
spaceconfig = dict(usemodules=('_weakref',))
-
+
+ def setup_class(cls):
+ cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
+
def test_simple(self):
import _weakref, gc
class A(object):
@@ -287,6 +290,9 @@
assert a1 is None
def test_del_and_callback_and_id(self):
+ if not self.runappdirect:
+ skip("the id() doesn't work correctly in __del__ and "
+ "callbacks before translation")
import gc, weakref
seen_del = []
class A(object):
From pypy.commits at gmail.com Thu May 5 18:02:27 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 15:02:27 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update hashes for correct source
tarballs
Message-ID: <572bc2f3.09ad1c0a.4edca.5294@mx.google.com>
Author: Matti Picus
Branch: extradoc
Changeset: r745:dbb2b880c58a
Date: 2016-05-06 01:02 +0300
http://bitbucket.org/pypy/pypy.org/changeset/dbb2b880c58a/
Log: update hashes for correct source tarballs
diff --git a/download.html b/download.html
--- a/download.html
+++ b/download.html
@@ -385,8 +385,8 @@
224e4d5870d88fb444d8f4f1791140e5 pypy-5.1.1-linux.tar.bz2
e35510b39e34f1c2199c283bf8655e5c pypy-5.1.1-osx64.tar.bz2
9d8b82448416e0203efa325364f759e8 pypy-5.1.1-s390x.tar.bz2
-7aff685c28941fda6a74863c53931e38 pypy-5.1.1-src.tar.bz2
-ee9795d8638d34126ca24e4757a73056 pypy-5.1.1-src.zip
+8c2630896178e650e593686ddae625ac pypy-5.1.1-src.tar.bz2
+f70ee6096d567c549a2bf11484bfbd0b pypy-5.1.1-src.zip
d70b4385fbf0a5e5260f6b7bedb231d4 pypy-5.1.1-win32.zip
pypy-5.1.0 md5:
@@ -428,8 +428,8 @@
6767056bb71081bce8fcee04de0d0be02d71d4f9 pypy-5.1.1-linux.tar.bz2
734eb82489d57a3b2b55d6b83153b3972dc6781d pypy-5.1.1-osx64.tar.bz2
2440d613430f9dfc57bc8db5cfd087f1169ee2d0 pypy-5.1.1-s390x.tar.bz2
-34eca157e025e65f9dc1f419fa56ce31ad635e9c pypy-5.1.1-src.tar.bz2
-95596b62cf2bb6ebd4939584040e713ceec9ef0a pypy-5.1.1-src.zip
+830e0a2c43c518b8c2b33f4ae40ac72b25e6da02 pypy-5.1.1-src.tar.bz2
+bf4826218579f7339acfb70fa0e6107d3527b095 pypy-5.1.1-src.zip
3694e37c1cf6a2a938c108ee69126e4f40a0886e pypy-5.1.1-win32.zip
pypy-5.1.0 sha1:
@@ -454,8 +454,8 @@
7951fd2b87c9e621ec57c932c20da2b8a4a9e87d8daeb9e2b7373f9444219abc pypy-5.1.1-linux.tar.bz2
fe2bbb7cf95eb91b1724029f81e85d1dbb6025a2e9a005cfe7258fe07602f771 pypy-5.1.1-osx64.tar.bz2
4acd1066e07eb668665b302bf8e9338b6df136082c5ce28c62b70c6bb1b5cf9f pypy-5.1.1-s390x.tar.bz2
-99aff0c710c46903b821c7c436f9cb9de16bd7370d923f99cc7c28a66be6c5b2 pypy-5.1.1-src.tar.bz2
-7c0c5157e7977674aa942de3c20ff0567f7af986824f6674e2424f6089c41501 pypy-5.1.1-src.zip
+ca3d943d7fbd78bb957ee9e5833ada4bb8506ac99a41b7628790e286a65ed2be pypy-5.1.1-src.tar.bz2
+cdcc967da36cde5586839cc631ef0d9123e19d3ce71ccfba03c68ac887374884 pypy-5.1.1-src.zip
22a780e328ef053e098f2edc2302957ac3119adf7bf11ff23e225931806e7bcd pypy-5.1.1-win32.zip
pypy-5.1.0 sha256:
diff --git a/source/download.txt b/source/download.txt
--- a/source/download.txt
+++ b/source/download.txt
@@ -421,8 +421,8 @@
224e4d5870d88fb444d8f4f1791140e5 pypy-5.1.1-linux.tar.bz2
e35510b39e34f1c2199c283bf8655e5c pypy-5.1.1-osx64.tar.bz2
9d8b82448416e0203efa325364f759e8 pypy-5.1.1-s390x.tar.bz2
- 7aff685c28941fda6a74863c53931e38 pypy-5.1.1-src.tar.bz2
- ee9795d8638d34126ca24e4757a73056 pypy-5.1.1-src.zip
+ 8c2630896178e650e593686ddae625ac pypy-5.1.1-src.tar.bz2
+ f70ee6096d567c549a2bf11484bfbd0b pypy-5.1.1-src.zip
d70b4385fbf0a5e5260f6b7bedb231d4 pypy-5.1.1-win32.zip
pypy-5.1.0 md5::
@@ -466,8 +466,8 @@
6767056bb71081bce8fcee04de0d0be02d71d4f9 pypy-5.1.1-linux.tar.bz2
734eb82489d57a3b2b55d6b83153b3972dc6781d pypy-5.1.1-osx64.tar.bz2
2440d613430f9dfc57bc8db5cfd087f1169ee2d0 pypy-5.1.1-s390x.tar.bz2
- 34eca157e025e65f9dc1f419fa56ce31ad635e9c pypy-5.1.1-src.tar.bz2
- 95596b62cf2bb6ebd4939584040e713ceec9ef0a pypy-5.1.1-src.zip
+ 830e0a2c43c518b8c2b33f4ae40ac72b25e6da02 pypy-5.1.1-src.tar.bz2
+ bf4826218579f7339acfb70fa0e6107d3527b095 pypy-5.1.1-src.zip
3694e37c1cf6a2a938c108ee69126e4f40a0886e pypy-5.1.1-win32.zip
pypy-5.1.0 sha1::
@@ -492,8 +492,8 @@
7951fd2b87c9e621ec57c932c20da2b8a4a9e87d8daeb9e2b7373f9444219abc pypy-5.1.1-linux.tar.bz2
fe2bbb7cf95eb91b1724029f81e85d1dbb6025a2e9a005cfe7258fe07602f771 pypy-5.1.1-osx64.tar.bz2
4acd1066e07eb668665b302bf8e9338b6df136082c5ce28c62b70c6bb1b5cf9f pypy-5.1.1-s390x.tar.bz2
- 99aff0c710c46903b821c7c436f9cb9de16bd7370d923f99cc7c28a66be6c5b2 pypy-5.1.1-src.tar.bz2
- 7c0c5157e7977674aa942de3c20ff0567f7af986824f6674e2424f6089c41501 pypy-5.1.1-src.zip
+ ca3d943d7fbd78bb957ee9e5833ada4bb8506ac99a41b7628790e286a65ed2be pypy-5.1.1-src.tar.bz2
+ cdcc967da36cde5586839cc631ef0d9123e19d3ce71ccfba03c68ac887374884 pypy-5.1.1-src.zip
22a780e328ef053e098f2edc2302957ac3119adf7bf11ff23e225931806e7bcd pypy-5.1.1-win32.zip
pypy-5.1.0 sha256::
From pypy.commits at gmail.com Thu May 5 18:08:18 2016
From: pypy.commits at gmail.com (Sergey Matyunin)
Date: Thu, 05 May 2016 15:08:18 -0700 (PDT)
Subject: [pypy-commit] pypy numpy_broadcast_nd: Fixed nested call of
numpy.broadcast
Message-ID: <572bc452.0f801c0a.8f688.ffffcce4@mx.google.com>
Author: Sergey Matyunin
Branch: numpy_broadcast_nd
Changeset: r84230:b58fe1445add
Date: 2016-05-01 18:19 +0200
http://bitbucket.org/pypy/pypy/changeset/b58fe1445add/
Log: Fixed nested call of numpy.broadcast
diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py
--- a/pypy/module/micronumpy/broadcast.py
+++ b/pypy/module/micronumpy/broadcast.py
@@ -37,11 +37,22 @@
except OverflowError as e:
raise oefmt(space.w_ValueError, "broadcast dimensions too large.")
- self.list_iter_state = [W_FlatIterator(arr, self.shape, arr.get_order() != self.order)
- for arr in self.seq]
+ self.list_iter_state = self._prepare_iterators()
self.done = False
+ def _prepare_iterators(self):
+ res = []
+ for arr in self.seq:
+ if isinstance(arr, W_Broadcast):
+ res.extend([self._create_iterator(it.base) for it in arr.list_iter_state])
+ else:
+ res.append(self._create_iterator(arr))
+ return res
+
+ def _create_iterator(self, arr):
+ return W_FlatIterator(arr, self.shape, arr.get_order() != self.order)
+
def get_shape(self):
return self.shape
@@ -49,10 +60,17 @@
return self.order
def get_dtype(self):
- return self.seq[0].get_dtype() #XXX Fixme
+ return self.seq[0].get_dtype() # XXX Fixme
def get_size(self):
- return 0 #XXX Fixme
+ return self.size
+
+ def is_scalar(self):
+ return self.ndims() == 0
+
+ def ndims(self):
+ return len(self.get_shape())
+ ndims._always_inline_ = True
def create_iter(self, shape=None, backward_broadcast=False):
return self, self.list_iter_state # XXX Fixme
From pypy.commits at gmail.com Thu May 5 18:08:20 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 15:08:20 -0700 (PDT)
Subject: [pypy-commit] pypy numpy_broadcast_nd: merge heads
Message-ID: <572bc454.e873c20a.284cd.ffffe421@mx.google.com>
Author: Matti Picus
Branch: numpy_broadcast_nd
Changeset: r84231:850303b78179
Date: 2016-05-05 18:04 +0300
http://bitbucket.org/pypy/pypy/changeset/850303b78179/
Log: merge heads
From pypy.commits at gmail.com Thu May 5 18:08:22 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 15:08:22 -0700 (PDT)
Subject: [pypy-commit] pypy default: 5.1.1 uses a rev number in the tag name,
fixes issue #2292
Message-ID: <572bc456.d81a1c0a.33af1.ffffcb91@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84232:6ab68565ea4e
Date: 2016-05-06 01:06 +0300
http://bitbucket.org/pypy/pypy/changeset/6ab68565ea4e/
Log: 5.1.1 uses a rev number in the tag name, fixes issue #2292
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -3,7 +3,7 @@
min=1
rev=1
branchname=release-$maj.x # ==OR== release-$maj.$min.x
-tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev
+tagname=release-$maj.$min.$rev # ==OR== release-$maj.$min
hg log -r $branchname || exit 1
hg log -r $tagname || exit 1
From pypy.commits at gmail.com Thu May 5 19:46:30 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Thu, 05 May 2016 16:46:30 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merged in marky1991/pypy_new/py3k (pull
request #442)
Message-ID: <572bdb56.45271c0a.d8568.ffffe599@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84234:a818809e80f2
Date: 2016-05-05 16:45 -0700
http://bitbucket.org/pypy/pypy/changeset/a818809e80f2/
Log: Merged in marky1991/pypy_new/py3k (pull request #442)
py3k Deque Fix
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -529,10 +529,15 @@
self.index = ri
return w_x
+ def reduce(self):
+ return self.space.newtuple([self.space.gettypefor(W_DequeIter),
+ self.space.newtuple([self.deque])])
+
W_DequeIter.typedef = TypeDef("_collections.deque_iterator",
__iter__ = interp2app(W_DequeIter.iter),
__length_hint__ = interp2app(W_DequeIter.length),
__next__ = interp2app(W_DequeIter.next),
+ __reduce__ = interp2app(W_DequeIter.reduce)
)
W_DequeIter.typedef.acceptable_as_base_class = False
From pypy.commits at gmail.com Thu May 5 19:46:46 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 05 May 2016 16:46:46 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Move deque fixes to py3k so I don't have
to merge with upstream again.
Message-ID: <572bdb66.d72d1c0a.4dc63.ffffe459@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84233:a01ca77166d6
Date: 2016-05-03 14:07 -0400
http://bitbucket.org/pypy/pypy/changeset/a01ca77166d6/
Log: Move deque fixes to py3k so I don't have to merge with upstream
again.
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -529,10 +529,15 @@
self.index = ri
return w_x
+ def reduce(self):
+ return self.space.newtuple([self.space.gettypefor(W_DequeIter),
+ self.space.newtuple([self.deque])])
+
W_DequeIter.typedef = TypeDef("_collections.deque_iterator",
__iter__ = interp2app(W_DequeIter.iter),
__length_hint__ = interp2app(W_DequeIter.length),
__next__ = interp2app(W_DequeIter.next),
+ __reduce__ = interp2app(W_DequeIter.reduce)
)
W_DequeIter.typedef.acceptable_as_base_class = False
From pypy.commits at gmail.com Thu May 5 20:36:51 2016
From: pypy.commits at gmail.com (william_ml_leslie)
Date: Thu, 05 May 2016 17:36:51 -0700 (PDT)
Subject: [pypy-commit] pypy verbose-imports: Print banner to stderr. Print
banner if verbose.
Message-ID: <572be723.923f1c0a.5b0e0.ffffe4c0@mx.google.com>
Author: William ML Leslie
Branch: verbose-imports
Changeset: r84235:847363e88c35
Date: 2016-05-06 10:35 +1000
http://bitbucket.org/pypy/pypy/changeset/847363e88c35/
Log: Print banner to stderr. Print banner if verbose.
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -527,6 +527,7 @@
warnoptions,
unbuffered,
ignore_environment,
+ verbose,
**ignored):
# with PyPy in top of CPython we can only have around 100
# but we need more in the translated PyPy for the compiler package
@@ -661,6 +662,8 @@
inspect = True
else:
# If not interactive, just read and execute stdin normally.
+ if verbose:
+ print_banner(not no_site)
@hidden_applevel
def run_it():
co_stdin = compile(sys.stdin.read(), '', 'exec',
@@ -722,10 +725,10 @@
return status
def print_banner(copyright):
- print 'Python %s on %s' % (sys.version, sys.platform)
+ print >> sys.stderr, 'Python %s on %s' % (sys.version, sys.platform)
if copyright:
- print ('Type "help", "copyright", "credits" or '
- '"license" for more information.')
+ print >> sys.stderr, ('Type "help", "copyright", "credits" or '
+ '"license" for more information.')
STDLIB_WARNING = """\
debug: WARNING: Library path not found, using compiled-in sys.path.
From pypy.commits at gmail.com Thu May 5 22:42:00 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Thu, 05 May 2016 19:42:00 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: match cpython's frozen importlib name
(it's still exposed in some cases)
Message-ID: <572c0478.821b1c0a.6ecc2.ffffb9e2@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84237:ca2390b5a5cc
Date: 2016-05-05 19:40 -0700
http://bitbucket.org/pypy/pypy/changeset/ca2390b5a5cc/
Log: match cpython's frozen importlib name (it's still exposed in some
cases)
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -188,7 +188,8 @@
# /lastdirname/basename.py
# instead of freezing the complete translation-time path.
filename = self.co_filename
- if filename.startswith(''):
+ if (filename.startswith('') or
+ filename == ''):
return
filename = filename.lstrip('<').rstrip('>')
if filename.lower().endswith('.pyc'):
diff --git a/pypy/module/_frozen_importlib/interp_import.py b/pypy/module/_frozen_importlib/interp_import.py
--- a/pypy/module/_frozen_importlib/interp_import.py
+++ b/pypy/module/_frozen_importlib/interp_import.py
@@ -7,8 +7,7 @@
space.getbuiltinmodule('_frozen_importlib').getdictvalue(
space, '__import__'), __args__)
except OperationError as e:
- e.remove_traceback_module_frames(
- '/frozen importlib._bootstrap')
+ e.remove_traceback_module_frames('')
raise
import_with_frames_removed = interp2app(import_with_frames_removed,
app_name='__import__')
From pypy.commits at gmail.com Fri May 6 01:47:45 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 22:47:45 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: fix cppyy (probably)
Message-ID: <572c3001.0f801c0a.8f688.2609@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84238:e2f8c467ca51
Date: 2016-05-06 07:42 +0200
http://bitbucket.org/pypy/pypy/changeset/e2f8c467ca51/
Log: fix cppyy (probably)
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1020,9 +1020,12 @@
class W_CPPInstance(W_Root):
- _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns']
+ _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns',
+ 'finalizer_registered']
_immutable_fields_ = ["cppclass", "isref"]
+ finalizer_registered = False
+
def __init__(self, space, cppclass, rawobject, isref, python_owns):
self.space = space
self.cppclass = cppclass
@@ -1032,6 +1035,12 @@
assert not isref or not python_owns
self.isref = isref
self.python_owns = python_owns
+ self._opt_register_finalizer()
+
+ def _opt_register_finalizer(self):
+ if self.python_owns and not self.finalizer_registered:
+ self.register_finalizer(self.space)
+ self.finalizer_registered = True
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
@@ -1045,6 +1054,7 @@
@unwrap_spec(value=bool)
def fset_python_owns(self, space, value):
self.python_owns = space.is_true(value)
+ self._opt_register_finalizer()
def get_cppthis(self, calling_scope):
return self.cppclass.get_cppthis(self, calling_scope)
@@ -1143,16 +1153,14 @@
(self.cppclass.name, rffi.cast(rffi.ULONG, self.get_rawobject())))
def destruct(self):
- assert isinstance(self, W_CPPInstance)
if self._rawobject and not self.isref:
memory_regulator.unregister(self)
capi.c_destruct(self.space, self.cppclass, self._rawobject)
self._rawobject = capi.C_NULL_OBJECT
- def __del__(self):
+ def _finalize_(self):
if self.python_owns:
- self.enqueue_for_destruction(self.space, W_CPPInstance.destruct,
- '__del__() method of ')
+ self.destruct()
W_CPPInstance.typedef = TypeDef(
'CPPInstance',
From pypy.commits at gmail.com Fri May 6 01:47:47 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 22:47:47 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix _ssl (probably)
Message-ID: <572c3003.d5da1c0a.f066.26d1@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84239:8e99fd479ff1
Date: 2016-05-06 07:48 +0200
http://bitbucket.org/pypy/pypy/changeset/8e99fd479ff1/
Log: Fix _ssl (probably)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -278,6 +278,8 @@
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
self.ssl = libssl_SSL_new(w_ctx.ctx) # new ssl struct
+ self.register_finalizer(space)
+
index = compute_unique_id(self)
libssl_SSL_set_app_data(self.ssl, rffi.cast(rffi.VOIDP, index))
SOCKET_STORAGE.set(index, self)
@@ -317,12 +319,7 @@
self.ssl_sock_weakref_w = None
return self
- def __del__(self):
- self.enqueue_for_destruction(self.space, _SSLSocket.destructor,
- '__del__() method of ')
-
- def destructor(self):
- assert isinstance(self, _SSLSocket)
+ def _finalize_(self):
if self.peer_cert:
libssl_X509_free(self.peer_cert)
if self.ssl:
@@ -1285,6 +1282,7 @@
self = space.allocate_instance(_SSLContext, w_subtype)
self.ctx = ctx
self.check_hostname = False
+ self.register_finalizer(space)
options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
if protocol != PY_SSL_VERSION_SSL2:
options |= SSL_OP_NO_SSLv2
@@ -1308,7 +1306,7 @@
return self
- def __del__(self):
+ def _finalize_(self):
libssl_SSL_CTX_free(self.ctx)
@unwrap_spec(server_side=int)
From pypy.commits at gmail.com Fri May 6 02:26:14 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 23:26:14 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-werror: merge default into branch
Message-ID: <572c3906.47afc20a.a55a6.4be6@mx.google.com>
Author: Matti Picus
Branch: cpyext-werror
Changeset: r84240:107db893f9d8
Date: 2016-05-06 08:55 +0300
http://bitbucket.org/pypy/pypy/changeset/107db893f9d8/
Log: merge default into branch
diff too long, truncating to 2000 out of 22221 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -21,3 +21,4 @@
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/TODO b/TODO
deleted file mode 100644
--- a/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* reduce size of generated c code from slot definitions in slotdefs.
-* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,123 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when there is no more reference to an object. Intended for
+objects that just need to free a block of raw memory or close a file.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects;
+and if you call an external C function, it must be a "safe" function
+(e.g. not releasing the GIL; use ``releasegil=False`` in
+``rffi.llexternal()``).
+
+If there are several objects with destructors that die during the same
+GC cycle, they are called in a completely random order --- but that
+should not matter because destructors cannot do much anyway.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+It is allowed in theory to cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +137,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +241,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +252,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -45,7 +45,26 @@
- improve tracking of PyObject to rpython object mapping
- support tp_as_{number, sequence, mapping, buffer} slots
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
.. branch: share-mapdict-methods-2
Reduce generated code for subclasses by using the same function objects in all
generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
+
+.. branch: cpyext-test-A
+
+Get the cpyext tests to pass with "-A" (i.e. when tested directly with
+CPython).
+
+.. branch: oefmt
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -78,7 +78,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
# we arrive here if no exception is raised. stdout cosmetics...
try:
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,11 +352,9 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,8 +16,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -399,8 +399,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -52,7 +52,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -67,8 +67,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -77,7 +77,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return '?'
raise
@@ -318,7 +318,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -406,7 +406,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +440,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +476,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -706,8 +706,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -764,7 +763,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -772,7 +771,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -872,7 +871,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -896,13 +895,12 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
if idx == expected_length:
- raise OperationError(self.w_ValueError,
- self.wrap("too many values to unpack"))
+ raise oefmt(self.w_ValueError, "too many values to unpack")
items[idx] = w_item
idx += 1
if idx < expected_length:
@@ -942,7 +940,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -952,7 +950,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -962,8 +960,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1049,7 +1047,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1167,7 +1165,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1287,7 +1285,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1330,8 +1328,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1351,8 +1348,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1365,7 +1361,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1375,7 +1371,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1396,20 +1392,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1526,7 +1519,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1555,8 +1548,8 @@
from rpython.rlib import rstring
result = w_obj.str_w(self)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1596,8 +1589,7 @@
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.isinstance_w(w_obj, self.w_str):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a string'))
+ raise oefmt(self.w_TypeError, "argument must be a string")
return self.str_w(w_obj)
def unicode_w(self, w_obj):
@@ -1608,16 +1600,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def bool_w(self, w_obj):
@@ -1636,8 +1628,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1645,8 +1637,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1654,8 +1645,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1663,8 +1653,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1673,11 +1663,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1705,7 +1693,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1716,7 +1704,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1731,22 +1719,20 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno() "
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if (not self.isinstance_w(w_fd, self.w_int) and
not self.isinstance_w(w_fd, self.w_long)):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -214,9 +214,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -563,7 +563,7 @@
while pending is not None:
try:
pending.callback(pending.w_obj)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(space, pending.descrname, pending.w_obj)
e.clear(space) # break up reference cycles
pending = pending.next
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -202,16 +202,15 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting function's dictionary to a non-dict")
- )
+ raise oefmt(space.w_TypeError,
+ "setting function's dictionary to a non-dict")
self.w_func_dict = w_dict
def descr_function__new__(space, w_subtype, w_code, w_globals,
w_name=None, w_argdefs=None, w_closure=None):
code = space.interp_w(Code, w_code)
if not space.isinstance_w(w_globals, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("expected dict"))
+ raise oefmt(space.w_TypeError, "expected dict")
if not space.is_none(w_name):
name = space.str_w(w_name)
else:
@@ -227,15 +226,15 @@
if space.is_none(w_closure) and nfreevars == 0:
closure = None
elif not space.is_w(space.type(w_closure), space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("invalid closure"))
+ raise oefmt(space.w_TypeError, "invalid closure")
else:
from pypy.interpreter.nestedscope import Cell
closure_w = space.unpackiterable(w_closure)
n = len(closure_w)
if nfreevars == 0:
- raise OperationError(space.w_ValueError, space.wrap("no closure needed"))
+ raise oefmt(space.w_ValueError, "no closure needed")
elif nfreevars != n:
- raise OperationError(space.w_ValueError, space.wrap("closure is wrong size"))
+ raise oefmt(space.w_ValueError, "closure is wrong size")
closure = [space.interp_w(Cell, w_cell) for w_cell in closure_w]
func = space.allocate_instance(Function, w_subtype)
Function.__init__(func, space, code, w_globals, defs_w, closure, name)
@@ -321,8 +320,8 @@
w_func_dict, w_module) = args_w
except ValueError:
# wrong args
- raise OperationError(space.w_ValueError,
- space.wrap("Wrong arguments to function.__setstate__"))
+ raise oefmt(space.w_ValueError,
+ "Wrong arguments to function.__setstate__")
self.space = space
self.name = space.str_w(w_name)
@@ -359,7 +358,8 @@
self.defs_w = []
return
if not space.isinstance_w(w_defaults, space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("func_defaults must be set to a tuple object or None"))
+ raise oefmt(space.w_TypeError,
+ "func_defaults must be set to a tuple object or None")
self.defs_w = space.fixedview(w_defaults)
def fdel_func_defaults(self, space):
@@ -380,8 +380,8 @@
if space.isinstance_w(w_name, space.w_str):
self.name = space.str_w(w_name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be set to a string object"))
+ raise oefmt(space.w_TypeError,
+ "__name__ must be set to a string object")
def fdel_func_doc(self, space):
self.w_doc = space.w_None
@@ -406,8 +406,8 @@
def fset_func_code(self, space, w_code):
from pypy.interpreter.pycode import PyCode
if not self.can_change_code:
- raise OperationError(space.w_TypeError,
- space.wrap("Cannot change code attribute of builtin functions"))
+ raise oefmt(space.w_TypeError,
+ "Cannot change code attribute of builtin functions")
code = space.interp_w(Code, w_code)
closure_len = 0
if self.closure:
@@ -457,8 +457,7 @@
if space.is_w(w_instance, space.w_None):
w_instance = None
if w_instance is None and space.is_none(w_class):
- raise OperationError(space.w_TypeError,
- space.wrap("unbound methods must have class"))
+ raise oefmt(space.w_TypeError, "unbound methods must have class")
method = space.allocate_instance(Method, w_subtype)
Method.__init__(method, space, w_function, w_instance, w_class)
return space.wrap(method)
@@ -540,7 +539,7 @@
try:
return space.call_method(space.w_object, '__getattribute__',
space.wrap(self), w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# fall-back to the attribute of the underlying 'im_func'
@@ -659,8 +658,8 @@
self.w_module = func.w_module
def descr_builtinfunction__new__(space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("cannot create 'builtin_function' instances"))
+ raise oefmt(space.w_TypeError,
+ "cannot create 'builtin_function' instances")
def descr_function_repr(self):
return self.space.wrap('' % (self.name,))
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -21,7 +21,7 @@
from pypy.interpreter.signature import Signature
from pypy.interpreter.baseobjspace import (W_Root, ObjSpace, SpaceCache,
DescrMismatch)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import ClassMethod, FunctionWithFixedCode
from rpython.rlib import rstackovf
from rpython.rlib.objectmodel import we_are_translated
@@ -686,7 +686,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -699,14 +699,13 @@
raise
raise e
except KeyboardInterrupt:
- raise OperationError(space.w_KeyboardInterrupt,
- space.w_None)
+ raise OperationError(space.w_KeyboardInterrupt, space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
- raise OperationError(space.w_RuntimeError,
- space.wrap("maximum recursion depth exceeded"))
+ raise oefmt(space.w_RuntimeError,
+ "maximum recursion depth exceeded")
except RuntimeError: # not on top of py.py
raise OperationError(space.w_RuntimeError, space.w_None)
@@ -725,7 +724,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -746,7 +745,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args.prepend(w_obj))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -762,9 +761,8 @@
try:
w_result = self.fastfunc_0(space)
except DescrMismatch:
- raise OperationError(space.w_SystemError,
- space.wrap("unexpected DescrMismatch error"))
- except Exception, e:
+ raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -784,7 +782,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -804,7 +802,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -824,7 +822,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2, w3]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -845,7 +843,7 @@
self.descr_reqcls,
Arguments(space,
[w1, w2, w3, w4]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,5 +1,5 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
from rpython.rlib import jit
@@ -76,8 +76,7 @@
def _send_ex(self, w_arg, operr):
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None:
# xxx a bit ad-hoc, but we don't want to go inside
@@ -89,8 +88,9 @@
last_instr = jit.promote(frame.last_instr)
if last_instr == -1:
if w_arg and not space.is_w(w_arg, space.w_None):
- msg = "can't send non-None value to a just-started generator"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can't send non-None value to a just-started "
+ "generator")
else:
if not w_arg:
w_arg = space.w_None
@@ -144,15 +144,15 @@
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
space.w_None)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration) or \
e.match(space, space.w_GeneratorExit):
return space.w_None
raise
if w_retval is not None:
- msg = "generator ignored GeneratorExit"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "generator ignored GeneratorExit")
def descr_gi_frame(self, space):
if self.frame is not None and not self.frame.frame_finished_execution:
@@ -184,8 +184,7 @@
# XXX copied and simplified version of send_ex()
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None: # already finished
return
@@ -197,7 +196,7 @@
results=results, pycode=pycode)
try:
w_result = frame.execute_frame(space.w_None)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -8,7 +8,7 @@
w_modules = space.sys.get('modules')
try:
return space.getitem(w_modules, w_main)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
mainmodule = module.Module(space, w_main)
@@ -52,7 +52,7 @@
else:
return
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.record_interpreter_traceback()
raise
@@ -110,7 +110,7 @@
try:
w_stdout = space.sys.get('stdout')
w_softspace = space.getattr(w_stdout, space.wrap('softspace'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# Don't crash if user defined stdout doesn't have softspace
@@ -118,7 +118,7 @@
if space.is_true(w_softspace):
space.call_method(w_stdout, 'write', space.wrap('\n'))
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.normalize_exception(space)
w_type = operationerr.w_type
w_value = operationerr.get_w_value(space)
@@ -162,7 +162,7 @@
space.call_function(w_hook, w_type, w_value, w_traceback)
return False # done
- except OperationError, err2:
+ except OperationError as err2:
# XXX should we go through sys.get('stderr') ?
print >> sys.stderr, 'Error calling sys.excepthook:'
err2.print_application_traceback(space)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -169,7 +169,7 @@
while 1:
try:
value = eval(spec, d)
- except NameError, ex:
+ except NameError as ex:
name = ex.args[0].split("'")[1] # super-Evil
if name in d:
raise # propagate the NameError
diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
--- a/pypy/interpreter/nestedscope.py
+++ b/pypy/interpreter/nestedscope.py
@@ -1,7 +1,7 @@
from rpython.tool.uid import uid
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.mixedmodule import MixedModule
@@ -78,4 +78,4 @@
try:
return self.get()
except ValueError:
- raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
+ raise oefmt(space.w_ValueError, "Cell is empty")
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -8,7 +8,7 @@
from pypy.interpreter import eval
from pypy.interpreter.signature import Signature
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.astcompiler.consts import (
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
@@ -110,7 +110,7 @@
if code_hook is not None:
try:
self.space.call_function(code_hook, self)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
@@ -374,14 +374,13 @@
lnotab, w_freevars=None, w_cellvars=None,
magic=default_magic):
if argcount < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: argcount must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: argcount must not be negative")
if nlocals < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: nlocals must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: nlocals must not be negative")
if not space.isinstance_w(w_constants, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("Expected tuple for constants"))
+ raise oefmt(space.w_TypeError, "Expected tuple for constants")
consts_w = space.fixedview(w_constants)
names = unpack_str_tuple(space, w_names)
varnames = unpack_str_tuple(space, w_varnames)
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -7,7 +7,7 @@
from pypy.interpreter.pyparser import future, pyparse, error as parseerror
from pypy.interpreter.astcompiler import (astbuilder, codegen, consts, misc,
optimize, ast)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class AbstractCompiler(object):
@@ -55,21 +55,21 @@
try:
code = self.compile(source, filename, mode, flags)
return code # success
- except OperationError, err:
+ except OperationError as err:
if not err.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n", filename, mode, flags)
return None # expect more
- except OperationError, err1:
+ except OperationError as err1:
if not err1.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n\n", filename, mode, flags)
raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
+ except OperationError as err2:
if not err2.match(space, space.w_SyntaxError):
raise
@@ -116,8 +116,7 @@
else:
check = True
if not check:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "invalid node type"))
+ raise oefmt(self.space.w_TypeError, "invalid node type")
fut = misc.parse_future(node, self.future_flags.compiler_features)
f_flags, f_lineno, f_col = fut
@@ -131,9 +130,8 @@
try:
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
- except parseerror.SyntaxError, e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ except parseerror.SyntaxError as e:
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return code
def compile_to_ast(self, source, filename, mode, flags):
@@ -145,12 +143,10 @@
try:
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
- except parseerror.IndentationError, e:
- raise OperationError(space.w_IndentationError,
- e.wrap_info(space))
- except parseerror.SyntaxError, e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ except parseerror.IndentationError as e:
+ raise OperationError(space.w_IndentationError, e.wrap_info(space))
+ except parseerror.SyntaxError as e:
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return mod
def compile(self, source, filename, mode, flags, hidden_applevel=False):
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -220,9 +220,9 @@
return # no cells needed - fast path
elif outer_func is None:
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("directly executed code object "
- "may not contain free variables"))
+ raise oefmt(space.w_TypeError,
+ "directly executed code object may not contain free "
+ "variables")
if outer_func and outer_func.closure:
closure_size = len(outer_func.closure)
else:
@@ -513,7 +513,7 @@
self.locals_cells_stack_w = values_w[:]
valuestackdepth = space.int_w(w_stackdepth)
if not self._check_stack_index(valuestackdepth):
- raise OperationError(space.w_ValueError, space.wrap("invalid stackdepth"))
+ raise oefmt(space.w_ValueError, "invalid stackdepth")
assert valuestackdepth >= 0
self.valuestackdepth = valuestackdepth
if space.is_w(w_exc_value, space.w_None):
@@ -550,7 +550,7 @@
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
- raise ValueError, "new fastscope is longer than the allocated area"
+ raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
@@ -686,12 +686,11 @@
try:
new_lineno = space.int_w(w_new_lineno)
except OperationError:
- raise OperationError(space.w_ValueError,
- space.wrap("lineno must be an integer"))
+ raise oefmt(space.w_ValueError, "lineno must be an integer")
if self.get_w_f_trace() is None:
- raise OperationError(space.w_ValueError,
- space.wrap("f_lineno can only be set by a trace function."))
+ raise oefmt(space.w_ValueError,
+ "f_lineno can only be set by a trace function.")
line = self.pycode.co_firstlineno
if new_lineno < line:
@@ -718,8 +717,8 @@
# Don't jump to a line with an except in it.
code = self.pycode.co_code
if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump to 'except' line as there's no exception"))
+ raise oefmt(space.w_ValueError,
+ "can't jump to 'except' line as there's no exception")
# Don't jump into or out of a finally block.
f_lasti_setup_addr = -1
@@ -800,8 +799,8 @@
new_iblock = f_iblock - delta_iblock
if new_iblock > min_iblock:
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump into the middle of a block"))
+ raise oefmt(space.w_ValueError,
+ "can't jump into the middle of a block")
while f_iblock > new_iblock:
block = self.pop_block()
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -67,9 +67,9 @@
def handle_bytecode(self, co_code, next_instr, ec):
try:
next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
- except OperationError, operr:
+ except OperationError as operr:
next_instr = self.handle_operation_error(ec, operr)
- except RaiseWithExplicitTraceback, e:
+ except RaiseWithExplicitTraceback as e:
next_instr = self.handle_operation_error(ec, e.operr,
attach_tb=False)
except KeyboardInterrupt:
@@ -78,7 +78,7 @@
except MemoryError:
next_instr = self.handle_asynchronous_error(ec,
self.space.w_MemoryError)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
# Note that this case catches AttributeError!
rstackovf.check_stack_overflow()
next_instr = self.handle_asynchronous_error(ec,
@@ -117,7 +117,7 @@
finally:
if trace is not None:
self.getorcreatedebug().w_f_trace = trace
- except OperationError, e:
+ except OperationError as e:
operr = e
pytraceback.record_application_traceback(
self.space, operr, self, self.last_instr)
@@ -844,7 +844,7 @@
w_varname = self.getname_w(varindex)
try:
self.space.delitem(self.getorcreatedebug().w_locals, w_varname)
- except OperationError, e:
+ except OperationError as e:
# catch KeyErrors and turn them into NameErrors
if not e.match(self.space, self.space.w_KeyError):
raise
@@ -1003,7 +1003,7 @@
try:
if space.int_w(w_flag) == -1:
w_flag = None
- except OperationError, e:
+ except OperationError as e:
if e.async(space):
raise
@@ -1040,7 +1040,7 @@
w_module = self.peekvalue()
try:
w_obj = self.space.getattr(w_module, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_AttributeError):
raise
raise oefmt(self.space.w_ImportError,
@@ -1099,7 +1099,7 @@
w_iterator = self.peekvalue()
try:
w_nextitem = self.space.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_StopIteration):
raise
# iterator exhausted
@@ -1110,7 +1110,7 @@
return next_instr
def FOR_LOOP(self, oparg, next_instr):
- raise BytecodeCorruption, "old opcode, no longer in use"
+ raise BytecodeCorruption("old opcode, no longer in use")
def SETUP_LOOP(self, offsettoend, next_instr):
block = LoopBlock(self, next_instr + offsettoend, self.lastblock)
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -118,7 +118,7 @@
if enc is not None and enc not in ('utf-8', 'iso-8859-1'):
try:
textsrc = recode_to_utf8(self.space, textsrc, enc)
- except OperationError, e:
+ except OperationError as e:
# if the codec is not found, LookupError is raised. we
# check using 'is_w' not to mask potential IndexError or
# KeyError
@@ -164,10 +164,10 @@
for tp, value, lineno, column, line in tokens:
if self.add_token(tp, value, lineno, column, line):
break
- except error.TokenError, e:
+ except error.TokenError as e:
e.filename = compile_info.filename
raise
- except parser.ParseError, e:
+ except parser.ParseError as e:
# Catch parse errors, pretty them up and reraise them as a
# SyntaxError.
new_err = error.IndentationError
diff --git a/pypy/interpreter/pyparser/test/unittest_samples.py b/pypy/interpreter/pyparser/test/unittest_samples.py
--- a/pypy/interpreter/pyparser/test/unittest_samples.py
+++ b/pypy/interpreter/pyparser/test/unittest_samples.py
@@ -66,7 +66,7 @@
print
try:
assert_tuples_equal(pypy_tuples, python_tuples)
- except AssertionError,e:
+ except AssertionError as e:
error_path = e.args[-1]
print "ERROR PATH =", error_path
print "="*80
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -224,7 +224,7 @@
def _spawn(self, *args, **kwds):
try:
import pexpect
- except ImportError, e:
+ except ImportError as e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -348,7 +348,7 @@
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={None: 1})
assert excinfo.value.w_type is TypeError
- assert excinfo.value._w_value is not None
+ assert excinfo.value._w_value is None
excinfo = py.test.raises(OperationError, Arguments, space, [],
["a"], [1], w_starstararg={valuedummy: 1})
assert excinfo.value.w_type is ValueError
@@ -618,14 +618,14 @@
space = self.space
try:
Arguments(space, [], w_stararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after * must be a sequence, not int"
else:
assert 0, "did not raise"
try:
Arguments(space, [], w_starstararg=space.wrap(42))
- except OperationError, e:
+ except OperationError as e:
msg = space.str_w(space.str(e.get_w_value(space)))
assert msg == "argument after ** must be a mapping, not int"
else:
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -696,7 +696,7 @@
""")
try:
self.compiler.compile(str(source), '', 'exec', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -706,7 +706,7 @@
code = 'def f(): (yield bar) += y'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -716,7 +716,7 @@
code = 'dict(a = i for i in xrange(10))'
try:
self.compiler.compile(code, '', 'single', 0)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self.space, self.space.w_SyntaxError):
raise
else:
@@ -1011,7 +1011,7 @@
"""
try:
exec source
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unindent does not match any outer indentation level'
else:
raise Exception("DID NOT RAISE")
@@ -1021,13 +1021,13 @@
source2 = "x = (\n\n"
try:
exec source1
- except SyntaxError, err1:
+ except SyntaxError as err1:
pass
else:
raise Exception("DID NOT RAISE")
try:
exec source2
- except SyntaxError, err2:
+ except SyntaxError as err2:
pass
else:
raise Exception("DID NOT RAISE")
diff --git a/pypy/interpreter/test/test_exceptcomp.py b/pypy/interpreter/test/test_exceptcomp.py
--- a/pypy/interpreter/test/test_exceptcomp.py
+++ b/pypy/interpreter/test/test_exceptcomp.py
@@ -7,7 +7,7 @@
def test_exception(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except TypeError:
pass
except:
@@ -15,7 +15,7 @@
def test_exceptionfail(self):
try:
- raise TypeError, "nothing"
+ raise TypeError("nothing")
except KeyError:
self.fail("Different exceptions match.")
except TypeError:
@@ -47,7 +47,7 @@
class UserExcept(Exception):
pass
try:
- raise UserExcept, "nothing"
+ raise UserExcept("nothing")
except UserExcept:
pass
except:
diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
--- a/pypy/interpreter/test/test_exec.py
+++ b/pypy/interpreter/test/test_exec.py
@@ -196,11 +196,11 @@
def test_filename(self):
try:
exec "'unmatched_quote"
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
try:
eval("'unmatched_quote")
- except SyntaxError, msg:
+ except SyntaxError as msg:
assert msg.filename == ''
def test_exec_and_name_lookups(self):
@@ -213,7 +213,7 @@
try:
res = f()
- except NameError, e: # keep py.test from exploding confused
+ except NameError as e: # keep py.test from exploding confused
raise e
assert res == 1
diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -296,14 +296,14 @@
def test_call_error_message(self):
try:
len()
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (0 given)" in e.message
else:
assert 0, "did not raise"
try:
len(1, 2)
- except TypeError, e:
+ except TypeError as e:
assert "len() takes exactly 1 argument (2 given)" in e.message
else:
assert 0, "did not raise"
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -26,7 +26,7 @@
wrappedfunc = space.getitem(w_glob, w(functionname))
try:
w_output = space.call_function(wrappedfunc, *wrappedargs)
- except error.OperationError, e:
+ except error.OperationError as e:
#e.print_detailed_traceback(space)
return '<<<%s>>>' % e.errorstr(space)
else:
@@ -331,7 +331,7 @@
def f(): f()
try:
f()
- except RuntimeError, e:
+ except RuntimeError as e:
assert str(e) == "maximum recursion depth exceeded"
else:
assert 0, "should have raised!"
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -86,7 +86,7 @@
""")
try:
space.unpackiterable(w_a)
- except OperationError, o:
+ except OperationError as o:
if not o.match(space, space.w_ZeroDivisionError):
raise Exception("DID NOT RAISE")
else:
@@ -237,7 +237,7 @@
self.space.getindex_w, w_instance2, self.space.w_IndexError)
try:
self.space.getindex_w(self.space.w_tuple, None, "foobar")
- except OperationError, e:
+ except OperationError as e:
assert e.match(self.space, self.space.w_TypeError)
assert "foobar" in e.errorstr(self.space)
else:
diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -376,7 +376,7 @@
def g():
try:
raise Exception
- except Exception, e:
+ except Exception as e:
import sys
raise Exception, e, sys.exc_info()[2]
diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py
--- a/pypy/interpreter/test/test_raise.py
+++ b/pypy/interpreter/test/test_raise.py
@@ -18,34 +18,34 @@
def test_1arg(self):
try:
raise SystemError, 1
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
def test_2args(self):
try:
raise SystemError, (1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_instancearg(self):
try:
raise SystemError, SystemError(1, 2)
- except Exception, e:
+ except Exception as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_more_precise_instancearg(self):
try:
raise Exception, SystemError(1, 2)
- except SystemError, e:
+ except SystemError as e:
assert e.args[0] == 1
assert e.args[1] == 2
def test_builtin_exc(self):
try:
[][0]
- except IndexError, e:
+ except IndexError as e:
assert isinstance(e, IndexError)
def test_raise_cls(self):
@@ -194,7 +194,7 @@
raise Sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a.__class__ is Sub
sub = Sub()
@@ -202,14 +202,14 @@
raise sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
try:
raise A, sub
except IndexError:
assert 0
- except A, a:
+ except A as a:
assert a is sub
assert sub.val is None
@@ -217,13 +217,13 @@
raise Sub, 42
except IndexError:
assert 0
- except A, a:
+ except A as a:
From pypy.commits at gmail.com Fri May 6 02:26:16 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 23:26:16 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-werror: close branch to be merged
Message-ID: <572c3908.d81a1c0a.33af1.3644@mx.google.com>
Author: Matti Picus
Branch: cpyext-werror
Changeset: r84241:aed18e5aa86f
Date: 2016-05-06 09:21 +0300
http://bitbucket.org/pypy/pypy/changeset/aed18e5aa86f/
Log: close branch to be merged
From pypy.commits at gmail.com Fri May 6 02:26:19 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 23:26:19 -0700 (PDT)
Subject: [pypy-commit] pypy default: document merged branches
Message-ID: <572c390b.8a37c20a.4d8f2.4f92@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84243:ae69ed743592
Date: 2016-05-06 09:25 +0300
http://bitbucket.org/pypy/pypy/changeset/ae69ed743592/
Log: document merged branches
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -68,3 +68,12 @@
CPython).
.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
From pypy.commits at gmail.com Fri May 6 02:26:18 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 05 May 2016 23:26:18 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge cpyext-werror which runs cpyext
tests with -Werror on linux
Message-ID: <572c390a.45271c0a.d8568.36d4@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84242:3a21ee5bfa7f
Date: 2016-05-06 09:22 +0300
http://bitbucket.org/pypy/pypy/changeset/3a21ee5bfa7f/
Log: merge cpyext-werror which runs cpyext tests with -Werror on linux
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -248,7 +248,7 @@
w_signature = rffi.charp2str(signature)
return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc,
check_return, w_signature)
-
+
def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc,
check_return, w_signature):
diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py
--- a/pypy/module/cpyext/test/test_borrow.py
+++ b/pypy/module/cpyext/test/test_borrow.py
@@ -12,13 +12,13 @@
PyObject *t = PyTuple_New(1);
PyObject *f = PyFloat_FromDouble(42.0);
PyObject *g = NULL;
- printf("Refcnt1: %i\\n", f->ob_refcnt);
+ printf("Refcnt1: %zd\\n", f->ob_refcnt);
PyTuple_SetItem(t, 0, f); // steals reference
- printf("Refcnt2: %i\\n", f->ob_refcnt);
+ printf("Refcnt2: %zd\\n", f->ob_refcnt);
f = PyTuple_GetItem(t, 0); // borrows reference
- printf("Refcnt3: %i\\n", f->ob_refcnt);
+ printf("Refcnt3: %zd\\n", f->ob_refcnt);
g = PyTuple_GetItem(t, 0); // borrows reference again
- printf("Refcnt4: %i\\n", f->ob_refcnt);
+ printf("Refcnt4: %zd\\n", f->ob_refcnt);
printf("COMPARE: %i\\n", f == g);
fflush(stdout);
Py_DECREF(t);
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -162,7 +162,10 @@
module = self.import_extension('foo', [
("string_None", "METH_VARARGS",
'''
- return PyString_AsString(Py_None);
+ if (PyString_AsString(Py_None)) {
+ Py_RETURN_NONE;
+ }
+ return NULL;
'''
)])
raises(TypeError, module.string_None)
diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py
--- a/pypy/module/cpyext/test/test_classobject.py
+++ b/pypy/module/cpyext/test/test_classobject.py
@@ -29,7 +29,6 @@
assert space.unwrap(space.getattr(w_instance, space.wrap('x'))) == 1
assert space.unwrap(space.getattr(w_instance, space.wrap('y'))) == 2
assert space.unwrap(space.getattr(w_instance, space.wrap('args'))) == (3,)
-
def test_lookup(self, space, api):
w_instance = space.appexec([], """():
@@ -68,7 +67,7 @@
("get_classtype", "METH_NOARGS",
"""
Py_INCREF(&PyClass_Type);
- return &PyClass_Type;
+ return (PyObject*)&PyClass_Type;
""")])
class C: pass
assert module.get_classtype() is type(C)
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -72,8 +72,7 @@
else:
kwds["link_files"] = [str(api_library + '.so')]
if sys.platform.startswith('linux'):
- kwds["compile_extra"]=["-Werror=implicit-function-declaration",
- "-g", "-O0"]
+ kwds["compile_extra"]=["-Werror", "-g", "-O0"]
kwds["link_extra"]=["-g"]
modname = modname.split('.')[-1]
@@ -747,7 +746,7 @@
refcnt_after = true_obj->ob_refcnt;
Py_DECREF(true_obj);
Py_DECREF(true_obj);
- fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after);
+ fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after);
return PyBool_FromLong(refcnt_after == refcnt + 2);
}
static PyObject* foo_bar(PyObject* self, PyObject *args)
@@ -763,7 +762,7 @@
return NULL;
refcnt_after = true_obj->ob_refcnt;
Py_DECREF(tup);
- fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after,
+ fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after,
true_obj->ob_refcnt);
return PyBool_FromLong(refcnt_after == refcnt + 1 &&
refcnt == true_obj->ob_refcnt);
diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -171,7 +171,7 @@
int little_endian, is_signed;
if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed))
return NULL;
- return _PyLong_FromByteArray("\x9A\xBC", 2,
+ return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC", 2,
little_endian, is_signed);
"""),
])
@@ -187,7 +187,7 @@
int little_endian, is_signed;
if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed))
return NULL;
- return _PyLong_FromByteArray("\x9A\xBC\x41", 3,
+ return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC\x41", 3,
little_endian, is_signed);
"""),
])
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -168,14 +168,14 @@
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
- if (val->ob_type != PyExc_TypeError)
+ if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
/* Normalize again */
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
- if (val->ob_type != PyExc_TypeError)
+ if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
PyErr_Restore(type, val, tb);
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -437,14 +437,14 @@
("test_tp_getattro", "METH_VARARGS",
'''
PyObject *name, *obj = PyTuple_GET_ITEM(args, 0);
- PyIntObject *attr, *value = PyTuple_GET_ITEM(args, 1);
+ PyIntObject *attr, *value = (PyIntObject*) PyTuple_GET_ITEM(args, 1);
if (!obj->ob_type->tp_getattro)
{
PyErr_SetString(PyExc_ValueError, "missing tp_getattro");
return NULL;
}
name = PyString_FromString("attr1");
- attr = obj->ob_type->tp_getattro(obj, name);
+ attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name);
if (attr->ob_ival != value->ob_ival)
{
PyErr_SetString(PyExc_ValueError,
@@ -454,7 +454,7 @@
Py_DECREF(name);
Py_DECREF(attr);
name = PyString_FromString("attr2");
- attr = obj->ob_type->tp_getattro(obj, name);
+ attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name);
if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError))
{
PyErr_Clear();
@@ -758,8 +758,9 @@
} IntLikeObject;
static int
- intlike_nb_nonzero(IntLikeObject *v)
+ intlike_nb_nonzero(PyObject *o)
{
+ IntLikeObject *v = (IntLikeObject*)o;
if (v->value == -42) {
PyErr_SetNone(PyExc_ValueError);
return -1;
From pypy.commits at gmail.com Fri May 6 02:49:31 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 23:49:31 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Careful
Message-ID: <572c3e7b.45271c0a.d8568.3ead@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84244:b057111d38cf
Date: 2016-05-06 08:01 +0200
http://bitbucket.org/pypy/pypy/changeset/b057111d38cf/
Log: Careful
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -156,7 +156,9 @@
By default, it is *not called*. See self.register_finalizer().
Be ready to handle the case where the object is only half
- initialized.
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
"""
def register_finalizer(self, space):
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -320,10 +320,14 @@
return self
def _finalize_(self):
- if self.peer_cert:
- libssl_X509_free(self.peer_cert)
- if self.ssl:
- libssl_SSL_free(self.ssl)
+ peer_cert = self.peer_cert
+ if peer_cert:
+ self.peer_cert = lltype.nullptr(X509.TO)
+ libssl_X509_free(peer_cert)
+ ssl = self.ssl
+ if ssl:
+ self.ssl = lltype.nullptr(SSL.TO)
+ libssl_SSL_free(ssl)
@unwrap_spec(data='bufferstr')
def write(self, space, data):
@@ -1307,7 +1311,10 @@
return self
def _finalize_(self):
- libssl_SSL_CTX_free(self.ctx)
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(SSL_CTX.TO)
+ libssl_SSL_CTX_free(ctx)
@unwrap_spec(server_side=int)
def descr_wrap_socket(self, space, w_sock, server_side, w_server_hostname=None, w_ssl_sock=None):
From pypy.commits at gmail.com Fri May 6 02:49:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 23:49:33 -0700 (PDT)
Subject: [pypy-commit] pypy default: Tweak: can now run "graphserver.py" or
"sshgraphserver.py LOCAL", and it
Message-ID: <572c3e7d.0e711c0a.a9c4f.3cf4@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84245:5c988098b449
Date: 2016-05-06 08:42 +0200
http://bitbucket.org/pypy/pypy/changeset/5c988098b449/
Log: Tweak: can now run "graphserver.py" or "sshgraphserver.py LOCAL",
and it doesn't use any ssh connection.
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
From pypy.commits at gmail.com Fri May 6 02:49:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 05 May 2016 23:49:35 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge heads
Message-ID: <572c3e7f.0b1f1c0a.fc792.3a80@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84246:7d2a931a40e7
Date: 2016-05-06 08:49 +0200
http://bitbucket.org/pypy/pypy/changeset/7d2a931a40e7/
Log: merge heads
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -68,3 +68,12 @@
CPython).
.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -248,7 +248,7 @@
w_signature = rffi.charp2str(signature)
return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc,
check_return, w_signature)
-
+
def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc,
check_return, w_signature):
diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py
--- a/pypy/module/cpyext/test/test_borrow.py
+++ b/pypy/module/cpyext/test/test_borrow.py
@@ -12,13 +12,13 @@
PyObject *t = PyTuple_New(1);
PyObject *f = PyFloat_FromDouble(42.0);
PyObject *g = NULL;
- printf("Refcnt1: %i\\n", f->ob_refcnt);
+ printf("Refcnt1: %zd\\n", f->ob_refcnt);
PyTuple_SetItem(t, 0, f); // steals reference
- printf("Refcnt2: %i\\n", f->ob_refcnt);
+ printf("Refcnt2: %zd\\n", f->ob_refcnt);
f = PyTuple_GetItem(t, 0); // borrows reference
- printf("Refcnt3: %i\\n", f->ob_refcnt);
+ printf("Refcnt3: %zd\\n", f->ob_refcnt);
g = PyTuple_GetItem(t, 0); // borrows reference again
- printf("Refcnt4: %i\\n", f->ob_refcnt);
+ printf("Refcnt4: %zd\\n", f->ob_refcnt);
printf("COMPARE: %i\\n", f == g);
fflush(stdout);
Py_DECREF(t);
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -162,7 +162,10 @@
module = self.import_extension('foo', [
("string_None", "METH_VARARGS",
'''
- return PyString_AsString(Py_None);
+ if (PyString_AsString(Py_None)) {
+ Py_RETURN_NONE;
+ }
+ return NULL;
'''
)])
raises(TypeError, module.string_None)
diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py
--- a/pypy/module/cpyext/test/test_classobject.py
+++ b/pypy/module/cpyext/test/test_classobject.py
@@ -29,7 +29,6 @@
assert space.unwrap(space.getattr(w_instance, space.wrap('x'))) == 1
assert space.unwrap(space.getattr(w_instance, space.wrap('y'))) == 2
assert space.unwrap(space.getattr(w_instance, space.wrap('args'))) == (3,)
-
def test_lookup(self, space, api):
w_instance = space.appexec([], """():
@@ -68,7 +67,7 @@
("get_classtype", "METH_NOARGS",
"""
Py_INCREF(&PyClass_Type);
- return &PyClass_Type;
+ return (PyObject*)&PyClass_Type;
""")])
class C: pass
assert module.get_classtype() is type(C)
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -72,8 +72,7 @@
else:
kwds["link_files"] = [str(api_library + '.so')]
if sys.platform.startswith('linux'):
- kwds["compile_extra"]=["-Werror=implicit-function-declaration",
- "-g", "-O0"]
+ kwds["compile_extra"]=["-Werror", "-g", "-O0"]
kwds["link_extra"]=["-g"]
modname = modname.split('.')[-1]
@@ -747,7 +746,7 @@
refcnt_after = true_obj->ob_refcnt;
Py_DECREF(true_obj);
Py_DECREF(true_obj);
- fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after);
+ fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after);
return PyBool_FromLong(refcnt_after == refcnt + 2);
}
static PyObject* foo_bar(PyObject* self, PyObject *args)
@@ -763,7 +762,7 @@
return NULL;
refcnt_after = true_obj->ob_refcnt;
Py_DECREF(tup);
- fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after,
+ fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after,
true_obj->ob_refcnt);
return PyBool_FromLong(refcnt_after == refcnt + 1 &&
refcnt == true_obj->ob_refcnt);
diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -171,7 +171,7 @@
int little_endian, is_signed;
if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed))
return NULL;
- return _PyLong_FromByteArray("\x9A\xBC", 2,
+ return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC", 2,
little_endian, is_signed);
"""),
])
@@ -187,7 +187,7 @@
int little_endian, is_signed;
if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed))
return NULL;
- return _PyLong_FromByteArray("\x9A\xBC\x41", 3,
+ return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC\x41", 3,
little_endian, is_signed);
"""),
])
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -168,14 +168,14 @@
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
- if (val->ob_type != PyExc_TypeError)
+ if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
/* Normalize again */
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
- if (val->ob_type != PyExc_TypeError)
+ if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
PyErr_Restore(type, val, tb);
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -437,14 +437,14 @@
("test_tp_getattro", "METH_VARARGS",
'''
PyObject *name, *obj = PyTuple_GET_ITEM(args, 0);
- PyIntObject *attr, *value = PyTuple_GET_ITEM(args, 1);
+ PyIntObject *attr, *value = (PyIntObject*) PyTuple_GET_ITEM(args, 1);
if (!obj->ob_type->tp_getattro)
{
PyErr_SetString(PyExc_ValueError, "missing tp_getattro");
return NULL;
}
name = PyString_FromString("attr1");
- attr = obj->ob_type->tp_getattro(obj, name);
+ attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name);
if (attr->ob_ival != value->ob_ival)
{
PyErr_SetString(PyExc_ValueError,
@@ -454,7 +454,7 @@
Py_DECREF(name);
Py_DECREF(attr);
name = PyString_FromString("attr2");
- attr = obj->ob_type->tp_getattro(obj, name);
+ attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name);
if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError))
{
PyErr_Clear();
@@ -758,8 +758,9 @@
} IntLikeObject;
static int
- intlike_nb_nonzero(IntLikeObject *v)
+ intlike_nb_nonzero(PyObject *o)
{
+ IntLikeObject *v = (IntLikeObject*)o;
if (v->value == -42) {
PyErr_SetNone(PyExc_ValueError);
return -1;
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -3,7 +3,7 @@
min=1
rev=1
branchname=release-$maj.x # ==OR== release-$maj.$min.x
-tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev
+tagname=release-$maj.$min.$rev # ==OR== release-$maj.$min
hg log -r $branchname || exit 1
hg log -r $tagname || exit 1
From pypy.commits at gmail.com Fri May 6 03:02:55 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:02:55 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix _hashlib
Message-ID: <572c419f.a423c20a.f9243.4ee9@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84247:ec939870c9dc
Date: 2016-05-06 09:02 +0200
http://bitbucket.org/pypy/pypy/changeset/ec939870c9dc/
Log: Fix _hashlib
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -65,7 +65,8 @@
# and use a custom lock only when needed.
self.lock = Lock(space)
- ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw')
+ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw',
+ track_allocation=False)
rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size)
try:
if copy_from:
@@ -74,13 +75,16 @@
ropenssl.EVP_DigestInit(ctx, digest_type)
self.ctx = ctx
except:
- lltype.free(ctx, flavor='raw')
+ lltype.free(ctx, flavor='raw', track_allocation=False)
raise
+ self.register_finalizer(space)
- def __del__(self):
- if self.ctx:
- ropenssl.EVP_MD_CTX_cleanup(self.ctx)
- lltype.free(self.ctx, flavor='raw')
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
+ ropenssl.EVP_MD_CTX_cleanup(ctx)
+ lltype.free(ctx, flavor='raw', track_allocation=False)
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py
--- a/rpython/annotator/classdesc.py
+++ b/rpython/annotator/classdesc.py
@@ -584,8 +584,8 @@
not getattr(cls.__del__, '_must_be_light_finalizer_', False)):
raise AnnotatorError(
"Class %r is in a class hierarchy with "
- "_must_be_light_finalizer_ = True, but it has a "
- "destructor without @rgc.must_be_light_finalizer" % (cls,))
+ "_must_be_light_finalizer_ = True: it cannot have a "
+ "finalizer without @rgc.must_be_light_finalizer" % (cls,))
def add_source_attribute(self, name, value, mixin=False):
if isinstance(value, property):
From pypy.commits at gmail.com Fri May 6 03:21:26 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:21:26 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Revert the
"track_allocations=False" change. Instead, add logic so that
Message-ID: <572c45f6.2171c20a.e6371.6ac5@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84248:7f438ed57c13
Date: 2016-05-06 09:21 +0200
http://bitbucket.org/pypy/pypy/changeset/7f438ed57c13/
Log: Revert the "track_allocations=False" change. Instead, add logic so
that the leakfinder at the end of app-level tests tries not only to
call gc.collect(), but also to call the UserDelAction.
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -65,8 +65,7 @@
# and use a custom lock only when needed.
self.lock = Lock(space)
- ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw',
- track_allocation=False)
+ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw')
rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size)
try:
if copy_from:
@@ -75,7 +74,7 @@
ropenssl.EVP_DigestInit(ctx, digest_type)
self.ctx = ctx
except:
- lltype.free(ctx, flavor='raw', track_allocation=False)
+ lltype.free(ctx, flavor='raw')
raise
self.register_finalizer(space)
@@ -84,7 +83,7 @@
if ctx:
self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
ropenssl.EVP_MD_CTX_cleanup(ctx)
- lltype.free(ctx, flavor='raw', track_allocation=False)
+ lltype.free(ctx, flavor='raw')
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py
--- a/pypy/tool/pytest/apptest.py
+++ b/pypy/tool/pytest/apptest.py
@@ -7,7 +7,7 @@
# ...unless the -A option ('runappdirect') is passed.
import py
-import sys, textwrap, types
+import sys, textwrap, types, gc
from pypy.interpreter.gateway import app2interp_temp
from pypy.interpreter.error import OperationError
from pypy.interpreter.function import Method
@@ -32,6 +32,7 @@
return traceback
def execute_appex(self, space, target, *args):
+ self.space = space
try:
target(*args)
except OperationError as e:
@@ -64,6 +65,13 @@
code = getattr(func, 'im_func', func).func_code
return "[%s:%s]" % (code.co_filename, code.co_firstlineno)
+ def track_allocations_collect(self):
+ gc.collect()
+ # must also invoke finalizers now; UserDelAction
+ # would not run at all unless invoked explicitly
+ if hasattr(self, 'space'):
+ self.space.getexecutioncontext()._run_finalizers_now()
+
class AppTestMethod(AppTestFunction):
def setup(self):
diff --git a/rpython/conftest.py b/rpython/conftest.py
--- a/rpython/conftest.py
+++ b/rpython/conftest.py
@@ -82,7 +82,13 @@
return
if (not getattr(item.obj, 'dont_track_allocations', False)
and leakfinder.TRACK_ALLOCATIONS):
- item._pypytest_leaks = leakfinder.stop_tracking_allocations(False)
+ kwds = {}
+ try:
+ kwds['do_collection'] = item.track_allocations_collect
+ except AttributeError:
+ pass
+ item._pypytest_leaks = leakfinder.stop_tracking_allocations(False,
+ **kwds)
else: # stop_tracking_allocations() already called
item._pypytest_leaks = None
diff --git a/rpython/tool/leakfinder.py b/rpython/tool/leakfinder.py
--- a/rpython/tool/leakfinder.py
+++ b/rpython/tool/leakfinder.py
@@ -37,13 +37,13 @@
ALLOCATED.clear()
return result
-def stop_tracking_allocations(check, prev=None):
+def stop_tracking_allocations(check, prev=None, do_collection=gc.collect):
global TRACK_ALLOCATIONS
assert TRACK_ALLOCATIONS
for i in range(5):
if not ALLOCATED:
break
- gc.collect()
+ do_collection()
result = ALLOCATED.copy()
ALLOCATED.clear()
if prev is None:
From pypy.commits at gmail.com Fri May 6 03:22:14 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:22:14 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix zlib
Message-ID: <572c4626.08121c0a.1dacd.4d87@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84249:b075b2f93078
Date: 2016-05-06 09:22 +0200
http://bitbucket.org/pypy/pypy/changeset/b075b2f93078/
Log: Fix zlib
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -148,8 +148,9 @@
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.deflateEnd(self.stream)
@@ -258,8 +259,9 @@
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.inflateEnd(self.stream)
From pypy.commits at gmail.com Fri May 6 03:30:11 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:30:11 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix bz2
Message-ID: <572c4803.82bb1c0a.c77bb.472e@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84250:254752b4b3fb
Date: 2016-05-06 09:30 +0200
http://bitbucket.org/pypy/pypy/changeset/254752b4b3fb/
Log: Fix bz2
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -518,8 +518,14 @@
def __init__(self, space, compresslevel):
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self._init_bz2comp(compresslevel)
+ try:
+ self.running = False
+ self._init_bz2comp(compresslevel)
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
@@ -532,9 +538,12 @@
self.running = True
- def __del__(self):
- BZ2_bzCompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzCompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def compress(self, data):
@@ -621,10 +630,16 @@
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self.unused_data = ""
+ try:
+ self.running = False
+ self.unused_data = ""
- self._init_bz2decomp()
+ self._init_bz2decomp()
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2decomp(self):
bzerror = BZ2_bzDecompressInit(self.bzs, 0, 0)
@@ -633,9 +648,12 @@
self.running = True
- def __del__(self):
- BZ2_bzDecompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzDecompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def decompress(self, data):
diff --git a/pypy/module/bz2/test/support.py b/pypy/module/bz2/test/support.py
--- a/pypy/module/bz2/test/support.py
+++ b/pypy/module/bz2/test/support.py
@@ -10,5 +10,6 @@
#
while tries and ll2ctypes.ALLOCATED:
gc.collect() # to make sure we disallocate buffers
+ self.space.getexecutioncontext()._run_finalizers_now()
tries -= 1
assert not ll2ctypes.ALLOCATED
From pypy.commits at gmail.com Fri May 6 03:31:46 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:31:46 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix _multibytecodec
Message-ID: <572c4862.8344c20a.2d101.6bfa@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84251:c410845de2c6
Date: 2016-05-06 09:32 +0200
http://bitbucket.org/pypy/pypy/changeset/c410845de2c6/
Log: Fix _multibytecodec
diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py
--- a/pypy/module/_multibytecodec/interp_incremental.py
+++ b/pypy/module/_multibytecodec/interp_incremental.py
@@ -20,8 +20,9 @@
self.codec = codec.codec
self.name = codec.name
self._initialize()
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
self._free()
def reset_w(self):
From pypy.commits at gmail.com Fri May 6 03:52:27 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:52:27 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix pyexpat
Message-ID: <572c4d3b.06921c0a.1e1d5.53c3@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84252:7946e940e452
Date: 2016-05-06 09:52 +0200
http://bitbucket.org/pypy/pypy/changeset/7946e940e452/
Log: Fix pyexpat
diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py
--- a/pypy/module/pyexpat/interp_pyexpat.py
+++ b/pypy/module/pyexpat/interp_pyexpat.py
@@ -421,8 +421,11 @@
class W_XMLParserType(W_Root):
+ id = -1
+
def __init__(self, space, parser, w_intern):
self.itself = parser
+ self.register_finalizer(space)
self.w_intern = w_intern
@@ -444,14 +447,17 @@
CallbackData(space, self))
XML_SetUserData(self.itself, rffi.cast(rffi.VOIDP, self.id))
- def __del__(self):
+ def _finalize_(self):
if XML_ParserFree: # careful with CPython interpreter shutdown
- XML_ParserFree(self.itself)
- if global_storage:
+ if self.itself:
+ XML_ParserFree(self.itself)
+ self.itself = lltype.nullptr(XML_Parser.TO)
+ if global_storage and self.id >= 0:
try:
global_storage.free_nonmoving_id(self.id)
except KeyError:
pass # maybe global_storage.clear() was already called
+ self.id = -1
@unwrap_spec(flag=int)
def SetParamEntityParsing(self, space, flag):
From pypy.commits at gmail.com Fri May 6 03:57:16 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 00:57:16 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix select
Message-ID: <572c4e5c.923f1c0a.5b0e0.4cee@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84253:6f05a5828b98
Date: 2016-05-06 09:57 +0200
http://bitbucket.org/pypy/pypy/changeset/6f05a5828b98/
Log: Fix select
diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py
--- a/pypy/module/select/interp_epoll.py
+++ b/pypy/module/select/interp_epoll.py
@@ -76,6 +76,7 @@
class W_Epoll(W_Root):
def __init__(self, space, epfd):
self.epfd = epfd
+ self.register_finalizer(space)
@unwrap_spec(sizehint=int)
def descr__new__(space, w_subtype, sizehint=-1):
@@ -94,7 +95,7 @@
def descr_fromfd(space, w_cls, fd):
return space.wrap(W_Epoll(space, fd))
- def __del__(self):
+ def _finalize_(self):
self.close()
def check_closed(self, space):
diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py
--- a/pypy/module/select/interp_kqueue.py
+++ b/pypy/module/select/interp_kqueue.py
@@ -109,6 +109,7 @@
class W_Kqueue(W_Root):
def __init__(self, space, kqfd):
self.kqfd = kqfd
+ self.register_finalizer(space)
def descr__new__(space, w_subtype):
kqfd = syscall_kqueue()
@@ -120,7 +121,7 @@
def descr_fromfd(space, w_cls, fd):
return space.wrap(W_Kqueue(space, fd))
- def __del__(self):
+ def _finalize_(self):
self.close()
def get_closed(self):
From pypy.commits at gmail.com Fri May 6 04:04:10 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 01:04:10 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix micronumpy
Message-ID: <572c4ffa.d1981c0a.729a8.5820@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84254:227d2de10882
Date: 2016-05-06 10:04 +0200
http://bitbucket.org/pypy/pypy/changeset/227d2de10882/
Log: Fix micronumpy
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -3,7 +3,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.argument import Arguments
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
from rpython.rlib.rarithmetic import LONG_BIT, maxint, _get_bitsize
from rpython.tool.sourcetools import func_with_new_name
from rpython.rlib.rawstorage import (
@@ -1534,6 +1534,7 @@
self.steps = alloc_raw_storage(0, track_allocation=False)
self.dims_steps_set = False
+ @rgc.must_be_light_finalizer
def __del__(self):
free_raw_storage(self.dims, track_allocation=False)
free_raw_storage(self.steps, track_allocation=False)
From pypy.commits at gmail.com Fri May 6 04:18:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 01:18:42 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix _multiprocessing
Message-ID: <572c5362.d2711c0a.9e252.5723@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84255:96181861aba3
Date: 2016-05-06 10:18 +0200
http://bitbucket.org/pypy/pypy/changeset/96181861aba3/
Log: Fix _multiprocessing
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -40,14 +40,17 @@
BUFFER_SIZE = 1024
buffer = lltype.nullptr(rffi.CCHARP.TO)
- def __init__(self, flags):
+ def __init__(self, space, flags):
self.flags = flags
self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE,
flavor='raw')
+ self.register_finalizer(space)
- def __del__(self):
- if self.buffer:
- lltype.free(self.buffer, flavor='raw')
+ def _finalize_(self):
+ buf = self.buffer
+ if buf:
+ self.buffer = lltype.nullptr(rffi.CCHARP.TO)
+ lltype.free(buf, flavor='raw')
try:
self.do_close()
except OSError:
@@ -242,7 +245,7 @@
def __init__(self, space, fd, flags):
if fd == self.INVALID_HANDLE_VALUE or fd < 0:
raise oefmt(space.w_IOError, "invalid handle %d", fd)
- W_BaseConnection.__init__(self, flags)
+ W_BaseConnection.__init__(self, space, flags)
self.fd = fd
@unwrap_spec(fd=int, readable=bool, writable=bool)
@@ -363,8 +366,8 @@
if sys.platform == 'win32':
from rpython.rlib.rwin32 import INVALID_HANDLE_VALUE
- def __init__(self, handle, flags):
- W_BaseConnection.__init__(self, flags)
+ def __init__(self, space, handle, flags):
+ W_BaseConnection.__init__(self, space, flags)
self.handle = handle
@unwrap_spec(readable=bool, writable=bool)
@@ -375,7 +378,7 @@
flags = (readable and READABLE) | (writable and WRITABLE)
self = space.allocate_instance(W_PipeConnection, w_subtype)
- W_PipeConnection.__init__(self, handle, flags)
+ W_PipeConnection.__init__(self, space, handle, flags)
return space.wrap(self)
def descr_repr(self, space):
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -430,11 +430,12 @@
class W_SemLock(W_Root):
- def __init__(self, handle, kind, maxvalue):
+ def __init__(self, space, handle, kind, maxvalue):
self.handle = handle
self.kind = kind
self.count = 0
self.maxvalue = maxvalue
+ self.register_finalizer(space)
def kind_get(self, space):
return space.newint(self.kind)
@@ -508,7 +509,7 @@
@unwrap_spec(kind=int, maxvalue=int)
def rebuild(space, w_cls, w_handle, kind, maxvalue):
self = space.allocate_instance(W_SemLock, w_cls)
- self.__init__(handle_w(space, w_handle), kind, maxvalue)
+ self.__init__(space, handle_w(space, w_handle), kind, maxvalue)
return space.wrap(self)
def enter(self, space):
@@ -517,7 +518,7 @@
def exit(self, space, __args__):
self.release(space)
- def __del__(self):
+ def _finalize_(self):
delete_semaphore(self.handle)
@unwrap_spec(kind=int, value=int, maxvalue=int)
@@ -534,7 +535,7 @@
raise wrap_oserror(space, e)
self = space.allocate_instance(W_SemLock, w_subtype)
- self.__init__(handle, kind, maxvalue)
+ self.__init__(space, handle, kind, maxvalue)
return space.wrap(self)
From pypy.commits at gmail.com Fri May 6 04:23:45 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 01:23:45 -0700 (PDT)
Subject: [pypy-commit] pypy default: Merged in unpacking-cpython-shortcut
(pull request #443)
Message-ID: <572c5491.442cc20a.f07ae.7be8@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84256:e98228f40d1f
Date: 2016-05-06 10:23 +0200
http://bitbucket.org/pypy/pypy/changeset/e98228f40d1f/
Log: Merged in unpacking-cpython-shortcut (pull request #443)
Copy CPython's 'optimization': ignore __iter__ etc. for
f(**dict_subclass())
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -688,3 +688,21 @@
def f(x): pass
e = raises(TypeError, "f(**{u'ü' : 19})")
assert "?" in str(e.value)
+
+ def test_starstarargs_dict_subclass(self):
+ def f(**kwargs):
+ return kwargs
+ class DictSubclass(dict):
+ def __iter__(self):
+ yield 'x'
+ # CPython, as an optimization, looks directly into dict internals when
+ # passing one via **kwargs.
+ x =DictSubclass()
+ assert f(**x) == {}
+ x['a'] = 1
+ assert f(**x) == {'a': 1}
+
+ def test_starstarargs_module_dict(self):
+ def f(**kwargs):
+ return kwargs
+ assert f(**globals()) == globals()
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -483,7 +483,7 @@
return None
def view_as_kwargs(self, w_dict):
- if type(w_dict) is W_DictObject:
+ if isinstance(w_dict, W_DictObject):
return w_dict.view_as_kwargs()
return (None, None)
From pypy.commits at gmail.com Fri May 6 04:26:53 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 01:26:53 -0700 (PDT)
Subject: [pypy-commit] pypy default: comments
Message-ID: <572c554d.0f801c0a.8f688.5cfe@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84257:78a9d921802c
Date: 2016-05-06 10:27 +0200
http://bitbucket.org/pypy/pypy/changeset/78a9d921802c/
Log: comments
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -77,3 +77,5 @@
Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
It is a more flexible way to make RPython finalizers.
+
+.. branch: unpacking-cpython-shortcut
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -483,6 +483,11 @@
return None
def view_as_kwargs(self, w_dict):
+ # Tries to return (keys_list, values_list), or (None, None) if
+ # it fails. It can fail on some dict implementations, so don't
+ # rely on it. For dict subclasses, though, it never fails;
+ # this emulates CPython's behavior which often won't call
+ # custom __iter__() or keys() methods in dict subclasses.
if isinstance(w_dict, W_DictObject):
return w_dict.view_as_kwargs()
return (None, None)
From pypy.commits at gmail.com Fri May 6 05:08:32 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 02:08:32 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: No point in caching in typedef.py
a subclass per space. The space is
Message-ID: <572c5f10.47afc20a.a55a6.ffff8b4f@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84258:807ed074327d
Date: 2016-05-06 11:08 +0200
http://bitbucket.org/pypy/pypy/changeset/807ed074327d/
Log: No point in caching in typedef.py a subclass per space. The space is
not used any more. So we can as well cache a single global subclass.
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -127,10 +127,7 @@
""" % (slots, methodname, checks[0], checks[1],
checks[2], checks[3]))
subclasses = {}
- for key, subcls in typedef._subclass_cache.items():
- if key[0] is not space.config:
- continue
- cls = key[1]
+ for cls, subcls in typedef._unique_subclass_cache.items():
subclasses.setdefault(cls, {})
prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls)
assert subcls is prevsubcls
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -108,18 +108,17 @@
def get_unique_interplevel_subclass(space, cls):
"NOT_RPYTHON: initialization-time only"
assert cls.typedef.acceptable_as_base_class
- key = space, cls
try:
- return _subclass_cache[key]
+ return _unique_subclass_cache[cls]
except KeyError:
- subcls = _getusercls(space, cls)
- assert key not in _subclass_cache
- _subclass_cache[key] = subcls
+ subcls = _getusercls(cls)
+ assert cls not in _unique_subclass_cache
+ _unique_subclass_cache[cls] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
-_subclass_cache = {}
+_unique_subclass_cache = {}
-def _getusercls(space, cls, reallywantdict=False):
+def _getusercls(cls, reallywantdict=False):
from rpython.rlib import objectmodel
from pypy.objspace.std.objectobject import W_ObjectObject
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -187,7 +187,7 @@
return
self.InstanceObjectCls = _getusercls(
- space, W_InstanceObject, reallywantdict=True)
+ W_InstanceObject, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
From pypy.commits at gmail.com Fri May 6 06:11:37 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 03:11:37 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Restore semblance of sanity to
gc.disable()/gc.enable(): now they
Message-ID: <572c6dd9.4ea81c0a.2c7ec.ffff8b15@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84259:88006cd809de
Date: 2016-05-06 12:10 +0200
http://bitbucket.org/pypy/pypy/changeset/88006cd809de/
Log: Restore semblance of sanity to gc.disable()/gc.enable(): now they
don't usually prevent running finalizers, but they will prevent some
explicitly-defined ones to run:
* all user __del__
* weakref callbacks
They don't prevent RPython-level things like closing files, and
also, by default, they don't prevent calls in other situations, like
cffi's 'ffi.gc()'.
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1856,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -533,32 +533,56 @@
AsyncAction.__init__(self, space)
self.finalizers_lock_count = 0 # see pypy/module/gc
self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
while True:
w_obj = self.space.finalizer_queue.next_dead()
if w_obj is None:
break
+ self._call_finalizer(w_obj)
- # Before calling the finalizers, clear the weakrefs, if any.
- w_obj.clear_all_weakrefs()
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. Use this function
+ # from _finalize_() methods that would call app-level some
+ # things that we consider shouldn't be called in gc.disable().
+ # (The exact definition is of course a bit vague, but most
+ # importantly this includes all user-level __del__().)
+ pdd = self.pending_with_disabled_del
+ if pdd is None:
+ return False
+ else:
+ pdd.append(w_obj)
+ return True
- # Look up and call the app-level __del__, if any.
+ def _call_finalizer(self, w_obj):
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
+ space = self.space
+ if w_obj.typedef is None:
+ w_del = None # obscure case: for WeakrefLifeline
+ else:
+ w_del = space.lookup(w_obj, '__del__')
+ if w_del is not None:
+ if self.gc_disabled(w_obj):
+ return
try:
- self.space.userdel(w_obj)
+ space.get_and_call_function(w_del, w_obj)
except Exception as e:
- report_error(self.space, e, "method __del__ of ", w_obj)
+ report_error(space, e, "method __del__ of ", w_obj)
- # Call the RPython-level _finalize_() method.
- try:
- w_obj._finalize_()
- except Exception as e:
- report_error(self.space, e, "finalizer of ", w_obj)
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ report_error(space, e, "finalizer of ", w_obj)
def report_error(space, e, where, w_obj):
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -650,6 +650,8 @@
if w_func is None:
w_func = self.getattr_from_class(space, '__del__')
if w_func is not None:
+ if self.space.user_del_action.gc_disabled(self):
+ return
space.call_function(w_func)
def descr_exit(self, space, w_type, w_value, w_tb):
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -17,6 +17,8 @@
class WeakrefLifeline(W_Root):
+ typedef = None
+
cached_weakref = None
cached_proxy = None
other_refs_weak = None
@@ -103,8 +105,7 @@
def enable_callbacks(self):
if not self.has_callbacks:
- fq = self.space.fromcache(Cache).fq
- fq.register_finalizer(self)
+ self.register_finalizer(self.space)
self.has_callbacks = True
@jit.dont_look_inside
@@ -127,39 +128,28 @@
self.enable_callbacks()
return w_proxy
-
-class WeakrefCallbackAction(AsyncAction):
- """An action that runs when a W_Root object goes away, and allows
- its lifeline to go away. It activates all the callbacks of all
- the dying lifelines.
- """
-
- def perform(self, executioncontext, frame):
- fq = self.space.fromcache(Cache).fq
- while True:
- lifeline = fq.next_dead()
- if lifeline is None:
- break
- if lifeline.other_refs_weak is None:
- continue # should never be the case, but better safe than sorry
- items = lifeline.other_refs_weak.items()
- for i in range(len(items)-1, -1, -1):
- w_ref = items[i]()
- if w_ref is not None and w_ref.w_callable is not None:
- try:
- w_ref.activate_callback()
- except Exception as e:
- report_error(self.space, e,
- "weakref callback ", w_ref.w_callable)
-
-class Cache:
- def __init__(self, space):
- class WeakrefFinalizerQueue(rgc.FinalizerQueue):
- Class = WeakrefLifeline
- def finalizer_trigger(self):
- space.weakref_callback_action.fire()
- space.weakref_callback_action = WeakrefCallbackAction(space)
- self.fq = WeakrefFinalizerQueue()
+ def _finalize_(self):
+ """This is called at the end, if enable_callbacks() was invoked.
+ It activates the callbacks.
+ """
+ if self.other_refs_weak is None:
+ return
+ #
+ # If this is set, then we're in the 'gc.disable()' mode. In that
+ # case, don't invoke the callbacks now.
+ if self.space.user_del_action.gc_disabled(self):
+ return
+ #
+ items = self.other_refs_weak.items()
+ self.other_refs_weak = None
+ for i in range(len(items)-1, -1, -1):
+ w_ref = items[i]()
+ if w_ref is not None and w_ref.w_callable is not None:
+ try:
+ w_ref.activate_callback()
+ except Exception as e:
+ report_error(self.space, e,
+ "weakref callback ", w_ref.w_callable)
# ____________________________________________________________
@@ -339,7 +329,7 @@
proxy_typedef_dict = {}
callable_proxy_typedef_dict = {}
-special_ops = {'repr': True, 'userdel': True, 'hash': True}
+special_ops = {'repr': True, 'hash': True}
for opname, _, arity, special_methods in ObjSpace.MethodTable:
if opname in special_ops or not special_methods:
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -38,13 +38,23 @@
return space.newbool(space.user_del_action.enabled_at_app_level)
def enable_finalizers(space):
- if space.user_del_action.finalizers_lock_count == 0:
+ uda = space.user_del_action
+ if uda.finalizers_lock_count == 0:
raise oefmt(space.w_ValueError, "finalizers are already enabled")
- space.user_del_action.finalizers_lock_count -= 1
- space.user_del_action.fire()
+ uda.finalizers_lock_count -= 1
+ if uda.finalizers_lock_count == 0:
+ pending = uda.pending_with_disabled_del
+ uda.pending_with_disabled_del = None
+ if pending is not None:
+ for i in range(len(pending)):
+ uda._call_finalizer(pending[i])
+ pending[i] = None # clear the list as we progress
def disable_finalizers(space):
- space.user_del_action.finalizers_lock_count += 1
+ uda = space.user_del_action
+ uda.finalizers_lock_count += 1
+ if uda.pending_with_disabled_del is None:
+ uda.pending_with_disabled_del = []
# ____________________________________________________________
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -440,11 +440,6 @@
raise oefmt(space.w_TypeError,
"__hash__() should return an int or long")
- def userdel(space, w_obj):
- w_del = space.lookup(w_obj, '__del__')
- if w_del is not None:
- space.get_and_call_function(w_del, w_obj)
-
def cmp(space, w_v, w_w):
if space.is_w(w_v, w_w):
From pypy.commits at gmail.com Fri May 6 07:17:03 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 04:17:03 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Some jit.dont_look_inside.
Message-ID: <572c7d2f.47afc20a.a55a6.ffffc22d@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84260:c2080e0f969f
Date: 2016-05-06 12:23 +0100
http://bitbucket.org/pypy/pypy/changeset/c2080e0f969f/
Log: Some jit.dont_look_inside.
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -105,7 +105,7 @@
def enable_callbacks(self):
if not self.has_callbacks:
- self.register_finalizer(self.space)
+ self.space.finalizer_queue.register_finalizer(self)
self.has_callbacks = True
@jit.dont_look_inside
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -393,6 +393,7 @@
return True
@specialize.arg(0)
+ @jit.dont_look_inside
def next_dead(self):
if we_are_translated():
from rpython.rtyper.lltypesystem.lloperation import llop
@@ -407,6 +408,7 @@
return None
@specialize.arg(0)
+ @jit.dont_look_inside
def register_finalizer(self, obj):
assert isinstance(obj, self.Class)
if we_are_translated():
From pypy.commits at gmail.com Fri May 6 07:34:24 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 04:34:24 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: update doc
Message-ID: <572c8140.0c2e1c0a.d4e63.ffffaea7@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84261:3b705156974d
Date: 2016-05-06 13:34 +0200
http://bitbucket.org/pypy/pypy/changeset/3b705156974d/
Log: update doc
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -95,10 +95,12 @@
To find the queued items, call ``fin.next_dead()`` repeatedly. It
returns the next queued item, or ``None`` when the queue is empty.
-It is allowed in theory to cumulate several different
+In theory, it would kind of work if you cumulate several different
``FinalizerQueue`` instances for objects of the same class, and
(always in theory) the same ``obj`` could be registered several times
in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
Ordering of finalizers
From pypy.commits at gmail.com Fri May 6 07:52:51 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 04:52:51 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Ignore the register_finalizer()
calls on top of Boehm
Message-ID: <572c8593.878d1c0a.59e9c.ffffb7bf@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84262:070d35dbb069
Date: 2016-05-06 13:52 +0200
http://bitbucket.org/pypy/pypy/changeset/070d35dbb069/
Log: Ignore the register_finalizer() calls on top of Boehm
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -102,6 +102,9 @@
For now the untranslated emulation does not support registering the
same object several times.
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
+
Ordering of finalizers
----------------------
diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h
--- a/rpython/translator/c/src/mem.h
+++ b/rpython/translator/c/src/mem.h
@@ -109,6 +109,9 @@
#define OP_GC__ENABLE_FINALIZERS(r) (boehm_gc_finalizer_lock--, \
boehm_gc_finalizer_notifier())
+#define OP_GC_FQ_REGISTER(tag, obj, r) /* ignored so far */
+#define OP_GC_FQ_NEXT_DEAD(tag, r) (r = NULL)
+
#endif /* PYPY_USING_BOEHM_GC */
@@ -121,6 +124,8 @@
#define GC_REGISTER_FINALIZER(a, b, c, d, e) /* nothing */
#define GC_gcollect() /* nothing */
#define GC_set_max_heap_size(a) /* nothing */
+#define OP_GC_FQ_REGISTER(tag, obj, r) /* nothing */
+#define OP_GC_FQ_NEXT_DEAD(tag, r) (r = NULL)
#endif
/************************************************************/
diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py
--- a/rpython/translator/c/test/test_boehm.py
+++ b/rpython/translator/c/test/test_boehm.py
@@ -2,7 +2,7 @@
import py
-from rpython.rlib import rgc
+from rpython.rlib import rgc, debug
from rpython.rlib.objectmodel import (keepalive_until_here, compute_unique_id,
compute_hash, current_object_addr_as_int)
from rpython.rtyper.lltypesystem import lltype, llmemory
@@ -392,3 +392,23 @@
assert res[2] != compute_hash(c) # likely
assert res[3] == compute_hash(d)
assert res[4] == compute_hash(("Hi", None, (7.5, 2, d)))
+
+ def test_finalizer_queue_is_at_least_ignored(self):
+ class A(object):
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ debug.debug_print("hello!") # not called so far
+ fq = FQ()
+ #
+ def fn():
+ fq.register_finalizer(A())
+ rgc.collect()
+ rgc.collect()
+ fq.next_dead()
+ return 42
+
+ f = self.getcompiled(fn)
+ res = f()
+ assert res == 42
From pypy.commits at gmail.com Fri May 6 09:07:03 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Fri, 06 May 2016 06:07:03 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Add astbuilder test for matmul
Message-ID: <572c96f7.aaf0c20a.6e68f.fffff272@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r84263:4b92de1eb94a
Date: 2016-05-06 15:06 +0200
http://bitbucket.org/pypy/pypy/changeset/4b92de1eb94a/
Log: Add astbuilder test for matmul
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -906,7 +906,8 @@
("/", ast.Div),
("*", ast.Mult),
("//", ast.FloorDiv),
- ("%", ast.Mod)
+ ("%", ast.Mod),
+ ("@", ast.MatMul)
)
for op, ast_type in binops:
bin = self.get_first_expr("a %s b" % (op,))
From pypy.commits at gmail.com Fri May 6 09:14:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 06:14:20 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Can't cumulate calls to
register_finalizer()
Message-ID: <572c98ac.4106c20a.e4ae0.31b6@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84264:4d9ac4622f35
Date: 2016-05-06 15:14 +0200
http://bitbucket.org/pypy/pypy/changeset/4d9ac4622f35/
Log: Can't cumulate calls to register_finalizer()
diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py
--- a/rpython/rlib/test/test_rgc.py
+++ b/rpython/rlib/test/test_rgc.py
@@ -327,8 +327,6 @@
fq = SimpleFQ()
w = T_Del2(42)
fq.register_finalizer(w)
- fq.register_finalizer(w)
- fq.register_finalizer(w)
del w
fq.register_finalizer(T_Del1(21))
gc.collect(); gc.collect()
From pypy.commits at gmail.com Fri May 6 09:17:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 06:17:07 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Fix test
Message-ID: <572c9953.4ac0c20a.3edef.3fe0@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84265:199051ab596e
Date: 2016-05-06 15:17 +0200
http://bitbucket.org/pypy/pypy/changeset/199051ab596e/
Log: Fix test
diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
--- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
@@ -28,10 +28,10 @@
p65 = getfield_gc_r(p14, descr=)
guard_value(p65, ConstPtr(ptr45), descr=...)
p66 = getfield_gc_r(p14, descr=)
- guard_nonnull_class(p66, ..., descr=...)
+ guard_nonnull(p66, descr=...)
p67 = force_token()
setfield_gc(p0, p67, descr=)
- p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=)
+ p68 = call_may_force_r(ConstClass(WeakrefLifeline.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
guard_nonnull_class(p68, ..., descr=...)
From pypy.commits at gmail.com Fri May 6 10:20:16 2016
From: pypy.commits at gmail.com (marky1991)
Date: Fri, 06 May 2016 07:20:16 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merged in marky1991/pypy_new/py3k (pull
request #444)
Message-ID: <572ca820.d81a1c0a.33af1.fffffd95@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84268:5624ae62ac73
Date: 2016-05-06 10:19 -0400
http://bitbucket.org/pypy/pypy/changeset/5624ae62ac73/
Log: Merged in marky1991/pypy_new/py3k (pull request #444)
Py3k Finish Deque Fix
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -149,7 +149,7 @@
RegrTest('test_codecmaps_jp.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_kr.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_tw.py', usemodules='_multibytecodec'),
- RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'),
+ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec struct unicodedata array'),
RegrTest('test_codeop.py', core=True),
RegrTest('test_coding.py', core=True),
RegrTest('test_collections.py', usemodules='binascii struct'),
@@ -179,7 +179,7 @@
RegrTest('test_decimal.py'),
RegrTest('test_decorators.py', core=True),
RegrTest('test_defaultdict.py', usemodules='_collections'),
- RegrTest('test_deque.py', core=True, usemodules='_collections'),
+ RegrTest('test_deque.py', core=True, usemodules='_collections struct'),
RegrTest('test_descr.py', core=True, usemodules='_weakref'),
RegrTest('test_descrtut.py', core=True),
RegrTest('test_devpoll.py'),
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -533,7 +533,16 @@
return self.space.newtuple([self.space.gettypefor(W_DequeIter),
self.space.newtuple([self.deque])])
+def W_DequeIter__new__(space, w_subtype, w_deque):
+ w_self = space.allocate_instance(W_DequeIter, w_subtype)
+ if not isinstance(w_deque, W_Deque):
+ raise oefmt(space.w_TypeError, "must be collections.deque, not %T", w_deque)
+
+ W_DequeIter.__init__(space.interp_w(W_DequeIter, w_self), w_deque)
+ return w_self
+
W_DequeIter.typedef = TypeDef("_collections.deque_iterator",
+ __new__ = interp2app(W_DequeIter__new__),
__iter__ = interp2app(W_DequeIter.iter),
__length_hint__ = interp2app(W_DequeIter.length),
__next__ = interp2app(W_DequeIter.next),
@@ -576,10 +585,24 @@
self.index = ri
return w_x
+ def reduce(self):
+ return self.space.newtuple([self.space.gettypefor(W_DequeRevIter),
+ self.space.newtuple([self.deque])])
+
+def W_DequeRevIter__new__(space, w_subtype, w_deque):
+ w_self = space.allocate_instance(W_DequeRevIter, w_subtype)
+ if not isinstance(w_deque, W_Deque):
+ raise oefmt(space.w_TypeError, "must be collections.deque, not %T", w_deque)
+
+ W_DequeRevIter.__init__(space.interp_w(W_DequeRevIter, w_self), w_deque)
+ return w_self
+
W_DequeRevIter.typedef = TypeDef("_collections.deque_reverse_iterator",
+ __new__ = interp2app(W_DequeRevIter__new__),
__iter__ = interp2app(W_DequeRevIter.iter),
__length_hint__ = interp2app(W_DequeRevIter.length),
__next__ = interp2app(W_DequeRevIter.next),
+ __reduce__ = interp2app(W_DequeRevIter.reduce)
)
W_DequeRevIter.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py
--- a/pypy/module/_collections/test/test_deque.py
+++ b/pypy/module/_collections/test/test_deque.py
@@ -1,6 +1,6 @@
class AppTestBasic:
- spaceconfig = dict(usemodules=['_collections'])
+ spaceconfig = dict(usemodules=['_collections', 'struct'])
def test_basics(self):
from _collections import deque
@@ -301,3 +301,19 @@
d.pop()
gc.collect(); gc.collect(); gc.collect()
assert X.freed
+
+ def test_DequeIter_pickle(self):
+ from _collections import deque
+ import pickle
+ d = deque([1,2,3])
+ iterator = iter(d)
+ copy = pickle.loads(pickle.dumps(iterator))
+ assert list(iterator) == list(copy)
+
+ def test_DequeRevIter_pickle(self):
+ from _collections import deque
+ import pickle
+ d = deque([1,2,3])
+ iterator = reversed(d)
+ copy = pickle.loads(pickle.dumps(iterator))
+ assert list(iterator) == list(copy)
From pypy.commits at gmail.com Fri May 6 10:20:43 2016
From: pypy.commits at gmail.com (marky1991)
Date: Fri, 06 May 2016 07:20:43 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix the message to report the type
correctly.
Message-ID: <572ca83b.a423c20a.f9243.07ab@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84267:1248da245a30
Date: 2016-05-06 10:04 -0400
http://bitbucket.org/pypy/pypy/changeset/1248da245a30/
Log: Fix the message to report the type correctly.
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -536,7 +536,7 @@
def W_DequeIter__new__(space, w_subtype, w_deque):
w_self = space.allocate_instance(W_DequeIter, w_subtype)
if not isinstance(w_deque, W_Deque):
- raise oefmt(space.w_TypeError, "must be collections.deque, not %T")
+ raise oefmt(space.w_TypeError, "must be collections.deque, not %T", w_deque)
W_DequeIter.__init__(space.interp_w(W_DequeIter, w_self), w_deque)
return w_self
@@ -592,7 +592,7 @@
def W_DequeRevIter__new__(space, w_subtype, w_deque):
w_self = space.allocate_instance(W_DequeRevIter, w_subtype)
if not isinstance(w_deque, W_Deque):
- raise oefmt(space.w_TypeError, "must be collections.deque, not %T")
+ raise oefmt(space.w_TypeError, "must be collections.deque, not %T", w_deque)
W_DequeRevIter.__init__(space.interp_w(W_DequeRevIter, w_self), w_deque)
return w_self
From pypy.commits at gmail.com Fri May 6 10:20:41 2016
From: pypy.commits at gmail.com (marky1991)
Date: Fri, 06 May 2016 07:20:41 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix deque as I meant to do before. Added
tests specifically testing pickleability of deque iterators.
Message-ID: <572ca839.4ea81c0a.2c7ec.fffff88c@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84266:e31a3547aa90
Date: 2016-05-06 01:13 -0400
http://bitbucket.org/pypy/pypy/changeset/e31a3547aa90/
Log: Fix deque as I meant to do before. Added tests specifically testing
pickleability of deque iterators.
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -149,7 +149,7 @@
RegrTest('test_codecmaps_jp.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_kr.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_tw.py', usemodules='_multibytecodec'),
- RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'),
+ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec struct unicodedata array'),
RegrTest('test_codeop.py', core=True),
RegrTest('test_coding.py', core=True),
RegrTest('test_collections.py', usemodules='binascii struct'),
@@ -179,7 +179,7 @@
RegrTest('test_decimal.py'),
RegrTest('test_decorators.py', core=True),
RegrTest('test_defaultdict.py', usemodules='_collections'),
- RegrTest('test_deque.py', core=True, usemodules='_collections'),
+ RegrTest('test_deque.py', core=True, usemodules='_collections struct'),
RegrTest('test_descr.py', core=True, usemodules='_weakref'),
RegrTest('test_descrtut.py', core=True),
RegrTest('test_devpoll.py'),
diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py
--- a/pypy/module/_collections/interp_deque.py
+++ b/pypy/module/_collections/interp_deque.py
@@ -533,7 +533,16 @@
return self.space.newtuple([self.space.gettypefor(W_DequeIter),
self.space.newtuple([self.deque])])
+def W_DequeIter__new__(space, w_subtype, w_deque):
+ w_self = space.allocate_instance(W_DequeIter, w_subtype)
+ if not isinstance(w_deque, W_Deque):
+ raise oefmt(space.w_TypeError, "must be collections.deque, not %T")
+
+ W_DequeIter.__init__(space.interp_w(W_DequeIter, w_self), w_deque)
+ return w_self
+
W_DequeIter.typedef = TypeDef("_collections.deque_iterator",
+ __new__ = interp2app(W_DequeIter__new__),
__iter__ = interp2app(W_DequeIter.iter),
__length_hint__ = interp2app(W_DequeIter.length),
__next__ = interp2app(W_DequeIter.next),
@@ -576,10 +585,24 @@
self.index = ri
return w_x
+ def reduce(self):
+ return self.space.newtuple([self.space.gettypefor(W_DequeRevIter),
+ self.space.newtuple([self.deque])])
+
+def W_DequeRevIter__new__(space, w_subtype, w_deque):
+ w_self = space.allocate_instance(W_DequeRevIter, w_subtype)
+ if not isinstance(w_deque, W_Deque):
+ raise oefmt(space.w_TypeError, "must be collections.deque, not %T")
+
+ W_DequeRevIter.__init__(space.interp_w(W_DequeRevIter, w_self), w_deque)
+ return w_self
+
W_DequeRevIter.typedef = TypeDef("_collections.deque_reverse_iterator",
+ __new__ = interp2app(W_DequeRevIter__new__),
__iter__ = interp2app(W_DequeRevIter.iter),
__length_hint__ = interp2app(W_DequeRevIter.length),
__next__ = interp2app(W_DequeRevIter.next),
+ __reduce__ = interp2app(W_DequeRevIter.reduce)
)
W_DequeRevIter.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py
--- a/pypy/module/_collections/test/test_deque.py
+++ b/pypy/module/_collections/test/test_deque.py
@@ -1,6 +1,6 @@
class AppTestBasic:
- spaceconfig = dict(usemodules=['_collections'])
+ spaceconfig = dict(usemodules=['_collections', 'struct'])
def test_basics(self):
from _collections import deque
@@ -301,3 +301,19 @@
d.pop()
gc.collect(); gc.collect(); gc.collect()
assert X.freed
+
+ def test_DequeIter_pickle(self):
+ from _collections import deque
+ import pickle
+ d = deque([1,2,3])
+ iterator = iter(d)
+ copy = pickle.loads(pickle.dumps(iterator))
+ assert list(iterator) == list(copy)
+
+ def test_DequeRevIter_pickle(self):
+ from _collections import deque
+ import pickle
+ d = deque([1,2,3])
+ iterator = reversed(d)
+ copy = pickle.loads(pickle.dumps(iterator))
+ assert list(iterator) == list(copy)
From pypy.commits at gmail.com Fri May 6 11:14:40 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Fri, 06 May 2016 08:14:40 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Add astbuilder test @=,
remove mistake in pyopcode
Message-ID: <572cb4e0.2457c20a.f0ca6.2d5c@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r84269:07d590485a65
Date: 2016-05-06 17:13 +0200
http://bitbucket.org/pypy/pypy/changeset/07d590485a65/
Log: Add astbuilder test @=, remove mistake in pyopcode
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -625,6 +625,7 @@
("/=", ast.Div),
("//=", ast.FloorDiv),
("%=", ast.Mod),
+ ("@=", ast.MatMul),
("<<=", ast.LShift),
(">>=", ast.RShift),
("&=", ast.BitAnd),
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -592,7 +592,6 @@
INPLACE_MULTIPLY = binaryoperation("inplace_mul")
INPLACE_TRUE_DIVIDE = binaryoperation("inplace_truediv")
INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_floordiv")
- INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_matmul")
INPLACE_DIVIDE = binaryoperation("inplace_div")
# XXX INPLACE_DIVIDE must fall back to INPLACE_TRUE_DIVIDE with -Qnew
INPLACE_MODULO = binaryoperation("inplace_mod")
From pypy.commits at gmail.com Fri May 6 12:37:52 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 06 May 2016 09:37:52 -0700 (PDT)
Subject: [pypy-commit] pypy default: Remove the @specialize.arg_or_var. As
far as I can tell, inside pypy
Message-ID: <572cc860.ce9d1c0a.6763.2efd@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84270:3bfdbf0a6101
Date: 2016-05-06 18:18 +0200
http://bitbucket.org/pypy/pypy/changeset/3bfdbf0a6101/
Log: Remove the @specialize.arg_or_var. As far as I can tell, inside pypy
we never ever call any of these five functions with two different
constant arguments. Moreover for obscure reasons it crashes when we
translate pypy with -O0 --no-allworkingmodules...
diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py
--- a/rpython/rlib/runicode.py
+++ b/rpython/rlib/runicode.py
@@ -989,8 +989,6 @@
return result.build(), pos
-# Specialize on the errorhandler when it's a constant
- at specialize.arg_or_var(4)
def str_decode_ascii(s, size, errors, final=False,
errorhandler=None):
if errorhandler is None:
@@ -1020,8 +1018,6 @@
return result.build()
-# Specialize on the errorhandler when it's a constant
- at specialize.arg_or_var(3)
def unicode_encode_ucs1_helper(p, size, errors,
errorhandler=None, limit=256):
if errorhandler is None:
@@ -1064,12 +1060,10 @@
return result.build()
- at specialize.arg_or_var(3)
def unicode_encode_latin_1(p, size, errors, errorhandler=None):
res = unicode_encode_ucs1_helper(p, size, errors, errorhandler, 256)
return res
- at specialize.arg_or_var(3)
def unicode_encode_ascii(p, size, errors, errorhandler=None):
res = unicode_encode_ucs1_helper(p, size, errors, errorhandler, 128)
return res
@@ -1194,8 +1188,6 @@
builder.append(res)
return pos
-# Specialize on the errorhandler when it's a constant
- at specialize.arg_or_var(4)
def str_decode_unicode_escape(s, size, errors, final=False,
errorhandler=None,
unicodedata_handler=None):
From pypy.commits at gmail.com Fri May 6 20:09:44 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Fri, 06 May 2016 17:09:44 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: now an OSError on 3.3
Message-ID: <572d3248.01341c0a.82308.ffffb23d@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84271:adaec121cd45
Date: 2016-05-06 17:08 -0700
http://bitbucket.org/pypy/pypy/changeset/adaec121cd45/
Log: now an OSError on 3.3
diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py
--- a/pypy/module/signal/interp_signal.py
+++ b/pypy/module/signal/interp_signal.py
@@ -11,7 +11,7 @@
PeriodicAsyncAction)
from pypy.interpreter.gateway import unwrap_spec
-from rpython.rlib import jit, rposix, rgc
+from rpython.rlib import jit, rgc
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.rarithmetic import intmask, widen
from rpython.rlib.rsignal import *
@@ -260,8 +260,7 @@
def siginterrupt(space, signum, flag):
check_signum_in_range(space, signum)
if rffi.cast(lltype.Signed, c_siginterrupt(signum, flag)) < 0:
- errno = rposix.get_saved_errno()
- raise OperationError(space.w_RuntimeError, space.wrap(errno))
+ raise exception_from_saved_errno(space, space.w_OSError)
#__________________________________________________________
From pypy.commits at gmail.com Fri May 6 20:39:11 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Fri, 06 May 2016 17:39:11 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast: Cast inside of macros so
that callers don't get compilation errors in PyPy.
Message-ID: <572d392f.10691c0a.62ac.ffffbe48@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-macros-cast
Changeset: r84272:cf292feacdbb
Date: 2016-05-06 17:37 -0700
http://bitbucket.org/pypy/pypy/changeset/cf292feacdbb/
Log: Cast inside of macros so that callers don't get compilation errors
in PyPy.
CPython defines many macros like so:
#define PyWhatever_FOO(x) (((PyWhatever*)(x))->foo)
And callers can pass in a `void*`, a `PyWhatever*`, a `PyObject*`,
and it all works assuming that the dynamic type is correct for the
cast.
In PyPy, without these casts, a warning is emitted if you pass the
"wrong" type, even though it would work in CPython. This breaks
compatibility for projects that build with -Werror.
(This used to be many commits, but I ended up gluing them all
together because I am no good at mercurial. Original is mostly at
this bitbucket repo for now:
https://bitbucket.org/devin.jeanpierre/pypy )
diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py
--- a/pypy/module/cpyext/cdatetime.py
+++ b/pypy/module/cpyext/cdatetime.py
@@ -179,67 +179,67 @@
# Accessors
@cpython_api([PyDateTime_Date], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_GET_YEAR(space, w_obj):
+def _PyDateTime_GET_YEAR(space, w_obj):
"""Return the year, as a positive int.
"""
return space.int_w(space.getattr(w_obj, space.wrap("year")))
@cpython_api([PyDateTime_Date], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_GET_MONTH(space, w_obj):
+def _PyDateTime_GET_MONTH(space, w_obj):
"""Return the month, as an int from 1 through 12.
"""
return space.int_w(space.getattr(w_obj, space.wrap("month")))
@cpython_api([PyDateTime_Date], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_GET_DAY(space, w_obj):
+def _PyDateTime_GET_DAY(space, w_obj):
"""Return the day, as an int from 1 through 31.
"""
return space.int_w(space.getattr(w_obj, space.wrap("day")))
@cpython_api([PyDateTime_DateTime], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DATE_GET_HOUR(space, w_obj):
+def _PyDateTime_DATE_GET_HOUR(space, w_obj):
"""Return the hour, as an int from 0 through 23.
"""
return space.int_w(space.getattr(w_obj, space.wrap("hour")))
@cpython_api([PyDateTime_DateTime], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DATE_GET_MINUTE(space, w_obj):
+def _PyDateTime_DATE_GET_MINUTE(space, w_obj):
"""Return the minute, as an int from 0 through 59.
"""
return space.int_w(space.getattr(w_obj, space.wrap("minute")))
@cpython_api([PyDateTime_DateTime], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DATE_GET_SECOND(space, w_obj):
+def _PyDateTime_DATE_GET_SECOND(space, w_obj):
"""Return the second, as an int from 0 through 59.
"""
return space.int_w(space.getattr(w_obj, space.wrap("second")))
@cpython_api([PyDateTime_DateTime], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DATE_GET_MICROSECOND(space, w_obj):
+def _PyDateTime_DATE_GET_MICROSECOND(space, w_obj):
"""Return the microsecond, as an int from 0 through 999999.
"""
return space.int_w(space.getattr(w_obj, space.wrap("microsecond")))
@cpython_api([PyDateTime_Time], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_TIME_GET_HOUR(space, w_obj):
+def _PyDateTime_TIME_GET_HOUR(space, w_obj):
"""Return the hour, as an int from 0 through 23.
"""
return space.int_w(space.getattr(w_obj, space.wrap("hour")))
@cpython_api([PyDateTime_Time], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_TIME_GET_MINUTE(space, w_obj):
+def _PyDateTime_TIME_GET_MINUTE(space, w_obj):
"""Return the minute, as an int from 0 through 59.
"""
return space.int_w(space.getattr(w_obj, space.wrap("minute")))
@cpython_api([PyDateTime_Time], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_TIME_GET_SECOND(space, w_obj):
+def _PyDateTime_TIME_GET_SECOND(space, w_obj):
"""Return the second, as an int from 0 through 59.
"""
return space.int_w(space.getattr(w_obj, space.wrap("second")))
@cpython_api([PyDateTime_Time], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_TIME_GET_MICROSECOND(space, w_obj):
+def _PyDateTime_TIME_GET_MICROSECOND(space, w_obj):
"""Return the microsecond, as an int from 0 through 999999.
"""
return space.int_w(space.getattr(w_obj, space.wrap("microsecond")))
@@ -249,13 +249,13 @@
# for types defined in a python module like lib/datetime.py.
@cpython_api([PyDateTime_Delta], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DELTA_GET_DAYS(space, w_obj):
+def _PyDateTime_DELTA_GET_DAYS(space, w_obj):
return space.int_w(space.getattr(w_obj, space.wrap("days")))
@cpython_api([PyDateTime_Delta], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DELTA_GET_SECONDS(space, w_obj):
+def _PyDateTime_DELTA_GET_SECONDS(space, w_obj):
return space.int_w(space.getattr(w_obj, space.wrap("seconds")))
@cpython_api([PyDateTime_Delta], rffi.INT_real, error=CANNOT_FAIL)
-def PyDateTime_DELTA_GET_MICROSECONDS(space, w_obj):
+def _PyDateTime_DELTA_GET_MICROSECONDS(space, w_obj):
return space.int_w(space.getattr(w_obj, space.wrap("microseconds")))
diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py
--- a/pypy/module/cpyext/floatobject.py
+++ b/pypy/module/cpyext/floatobject.py
@@ -49,7 +49,7 @@
return space.float_w(space.float(w_obj))
@cpython_api([PyObject], lltype.Float, error=CANNOT_FAIL)
-def PyFloat_AS_DOUBLE(space, w_float):
+def _PyFloat_AS_DOUBLE(space, w_float):
"""Return a C double representation of the contents of w_float, but
without error checking."""
return space.float_w(w_float)
diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h
--- a/pypy/module/cpyext/include/Python.h
+++ b/pypy/module/cpyext/include/Python.h
@@ -87,6 +87,7 @@
#include "pymath.h"
#include "pyport.h"
#include "warnings.h"
+#include "weakrefobject.h"
#include
#include
@@ -102,6 +103,7 @@
#include "funcobject.h"
#include "code.h"
+#include "abstract.h"
#include "modsupport.h"
#include "pythonrun.h"
#include "pyerrors.h"
@@ -129,6 +131,7 @@
#include "fileobject.h"
#include "pysignals.h"
#include "pythread.h"
+#include "setobject.h"
#include "traceback.h"
/* Missing definitions */
diff --git a/pypy/module/cpyext/include/abstract.h b/pypy/module/cpyext/include/abstract.h
--- a/pypy/module/cpyext/include/abstract.h
+++ b/pypy/module/cpyext/include/abstract.h
@@ -1,1 +1,3 @@
-/* empty */
+#define PySequence_Fast_GET_ITEM(seq, i) _PySequence_Fast_GET_ITEM((PyObject*)(seq), (i))
+#define PySequence_Fast_GET_SIZE(seq) _PySequence_Fast_GET_SIZE((PyObject*)(seq))
+#define PySequence_ITEM(seq, i) _PySequence_ITEM((PyObject*)(seq), (i))
diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h
--- a/pypy/module/cpyext/include/datetime.h
+++ b/pypy/module/cpyext/include/datetime.h
@@ -4,6 +4,27 @@
extern "C" {
#endif
+
+#define PyDateTime_GET_YEAR(o) _PyDateTime_GET_YEAR((PyDateTime_Date*)(o))
+#define PyDateTime_GET_MONTH(o) _PyDateTime_GET_MONTH((PyDateTime_Date*)(o))
+#define PyDateTime_GET_DAY(o) _PyDateTime_GET_DAY((PyDateTime_Date*)(o))
+
+#define PyDateTime_DATE_GET_HOUR(o) _PyDateTime_DATE_GET_HOUR((PyDateTime_DateTime*)(o))
+#define PyDateTime_DATE_GET_MINUTE(o) _PyDateTime_DATE_GET_MINUTE((PyDateTime_DateTime*)(o))
+#define PyDateTime_DATE_GET_SECOND(o) _PyDateTime_DATE_GET_SECOND((PyDateTime_DateTime*)(o))
+#define PyDateTime_DATE_GET_MICROSECOND(o) _PyDateTime_DATE_GET_MICROSECOND((PyDateTime_DateTime*)(o))
+
+#define PyDateTime_TIME_GET_HOUR(o) _PyDateTime_TIME_GET_HOUR((PyDateTime_Time*)(o))
+#define PyDateTime_TIME_GET_MINUTE(o) _PyDateTime_TIME_GET_MINUTE((PyDateTime_Time*)(o))
+#define PyDateTime_TIME_GET_SECOND(o) _PyDateTime_TIME_GET_SECOND((PyDateTime_Time*)(o))
+#define PyDateTime_TIME_GET_MICROSECOND(o) _PyDateTime_TIME_GET_MICROSECOND((PyDateTime_Time*)(o))
+
+#define PyDateTime_DELTA_GET_DAYS(o) _PyDateTime_DELTA_GET_DAYS((PyDateTime_Delta*)(o))
+#define PyDateTime_DELTA_GET_SECONDS(o) _PyDateTime_DELTA_GET_SECONDS((PyDateTime_Delta*)(o))
+#define PyDateTime_DELTA_GET_MICROSECONDS(o) _PyDateTime_DELTA_GET_MICROSECONDS((PyDateTime_Delta*)(o))
+
+
+
/* Define structure for C API. */
typedef struct {
/* type objects */
diff --git a/pypy/module/cpyext/include/floatobject.h b/pypy/module/cpyext/include/floatobject.h
--- a/pypy/module/cpyext/include/floatobject.h
+++ b/pypy/module/cpyext/include/floatobject.h
@@ -19,6 +19,8 @@
double ob_fval;
} PyFloatObject;
+#define PyFloat_AS_DOUBLE(o) _PyFloat_AS_DOUBLE((PyObject*)(o))
+
#define PyFloat_STR_PRECISION 12
#ifdef Py_NAN
diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h
--- a/pypy/module/cpyext/include/intobject.h
+++ b/pypy/module/cpyext/include/intobject.h
@@ -7,6 +7,8 @@
extern "C" {
#endif
+#define PyInt_AS_LONG(obj) _PyInt_AS_LONG((PyObject*)obj);
+
typedef struct {
PyObject_HEAD
long ob_ival;
diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h
--- a/pypy/module/cpyext/include/listobject.h
+++ b/pypy/module/cpyext/include/listobject.h
@@ -1,1 +1,3 @@
-#define PyList_GET_ITEM PyList_GetItem
+#define PyList_GET_ITEM(o, i) PyList_GetItem((PyObject*)(o), (i))
+#define PyList_SET_ITEM(o, i, v) _PyList_SET_ITEM((PyObject*)(o), (i), (v))
+#define PyList_GET_SIZE(o) _PyList_GET_SIZE((PyObject*)(o))
diff --git a/pypy/module/cpyext/include/setobject.h b/pypy/module/cpyext/include/setobject.h
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/include/setobject.h
@@ -0,0 +1,14 @@
+/* Int object interface */
+
+#ifndef Py_SETOBJECT_H
+#define Py_SETOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PySet_GET_SIZE(obj) _PySet_GET_SIZE((PyObject*)obj);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_SETOBJECT_H */
diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h
--- a/pypy/module/cpyext/include/unicodeobject.h
+++ b/pypy/module/cpyext/include/unicodeobject.h
@@ -5,6 +5,10 @@
extern "C" {
#endif
+#define PyUnicode_GET_SIZE(o) _PyUnicode_GET_SIZE((PyObject*)(o))
+#define PyUnicode_GET_DATA_SIZE(o) _PyUnicode_GET_DATA_SIZE((PyObject*)(o))
+#define PyUnicode_AS_UNICODE(o) _PyUnicode_AS_UNICODE((PyObject*)(o))
+
typedef unsigned int Py_UCS4;
#ifdef HAVE_USABLE_WCHAR_T
diff --git a/pypy/module/cpyext/include/weakrefobject.h b/pypy/module/cpyext/include/weakrefobject.h
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/include/weakrefobject.h
@@ -0,0 +1,1 @@
+#define PyWeakref_GET_OBJECT(o) PyWeakref_GetObject((PyObject*)(o))
diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py
--- a/pypy/module/cpyext/intobject.py
+++ b/pypy/module/cpyext/intobject.py
@@ -105,7 +105,7 @@
return num.ulonglongmask()
@cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL)
-def PyInt_AS_LONG(space, w_int):
+def _PyInt_AS_LONG(space, w_int):
"""Return the value of the object w_int. No error checking is performed."""
return space.int_w(w_int)
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -23,7 +23,7 @@
@cpython_api([PyObject, Py_ssize_t, PyObject], PyObject, error=CANNOT_FAIL,
result_borrowed=True)
-def PyList_SET_ITEM(space, w_list, index, w_item):
+def _PyList_SET_ITEM(space, w_list, index, w_item):
"""Macro form of PyList_SetItem() without error checking. This is normally
only used to fill in new lists where there is no previous content.
@@ -88,7 +88,7 @@
return 0
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
-def PyList_GET_SIZE(space, w_list):
+def _PyList_GET_SIZE(space, w_list):
"""Macro form of PyList_Size() without error checking.
"""
assert isinstance(w_list, W_ListObject)
@@ -102,7 +102,7 @@
"""
if not PyList_Check(space, ref):
raise oefmt(space.w_TypeError, "expected list object")
- return PyList_GET_SIZE(space, ref)
+ return _PyList_GET_SIZE(space, ref)
@cpython_api([PyObject], PyObject)
def PyList_AsTuple(space, w_list):
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -46,7 +46,7 @@
members of the result. Returns NULL on failure. If the object is not a
sequence, raises TypeError with m as the message text."""
if isinstance(w_obj, W_ListObject):
- # make sure we can return a borrowed obj from PySequence_Fast_GET_ITEM
+ # make sure we can return a borrowed obj from _PySequence_Fast_GET_ITEM
w_obj.convert_to_cpy_strategy(space)
return w_obj
try:
@@ -55,7 +55,7 @@
raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m)))
@cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True)
-def PySequence_Fast_GET_ITEM(space, w_obj, index):
+def _PySequence_Fast_GET_ITEM(space, w_obj, index):
"""Return the ith element of o, assuming that o was returned by
PySequence_Fast(), o is not NULL, and that i is within bounds.
"""
@@ -68,7 +68,7 @@
"sequence")
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
-def PySequence_Fast_GET_SIZE(space, w_obj):
+def _PySequence_Fast_GET_SIZE(space, w_obj):
"""Returns the length of o, assuming that o was returned by
PySequence_Fast() and that o is not NULL. The size can also be
gotten by calling PySequence_Size() on o, but
@@ -120,7 +120,7 @@
return 0
@cpython_api([PyObject, Py_ssize_t], PyObject)
-def PySequence_ITEM(space, w_obj, i):
+def _PySequence_ITEM(space, w_obj, i):
"""Return the ith element of o or NULL on failure. Macro form of
PySequence_GetItem() but without checking that
PySequence_Check(o)() is true and without adjustment for negative
@@ -134,7 +134,7 @@
def PySequence_GetItem(space, w_obj, i):
"""Return the ith element of o, or NULL on failure. This is the equivalent of
the Python expression o[i]."""
- return PySequence_ITEM(space, w_obj, i)
+ return _PySequence_ITEM(space, w_obj, i)
@cpython_api([PyObject], PyObject)
def PySequence_List(space, w_obj):
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
--- a/pypy/module/cpyext/setobject.py
+++ b/pypy/module/cpyext/setobject.py
@@ -75,7 +75,7 @@
return 0
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
-def PySet_GET_SIZE(space, w_s):
+def _PySet_GET_SIZE(space, w_s):
"""Macro form of PySet_Size() without error checking."""
return space.int_w(space.len(w_s))
@@ -86,7 +86,7 @@
or an instance of a subtype."""
if not PySet_Check(space, ref):
raise oefmt(space.w_TypeError, "expected set object")
- return PySet_GET_SIZE(space, ref)
+ return _PySet_GET_SIZE(space, ref)
@cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
def PySet_Contains(space, w_obj, w_key):
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -288,6 +288,24 @@
# This does not test much, but at least the refcounts are checked.
assert module.test_intern_inplace('s') == 's'
+ def test_bytes_macros(self):
+ """The PyString_* macros cast, and calls expecting that build."""
+ module = self.import_extension('foo', [
+ ("test_macro_invocations", "METH_NOARGS",
+ """
+ PyObject* o = PyString_FromString("");
+ PyStringObject* u = (PyStringObject*)o;
+
+ PyString_GET_SIZE(u);
+ PyString_GET_SIZE(o);
+
+ PyString_AS_STRING(o);
+ PyString_AS_STRING(u);
+
+ return o;
+ """)])
+ assert module.test_macro_invocations() == ''
+
def test_hash_and_state(self):
module = self.import_extension('foo', [
("test_hash", "METH_VARARGS",
diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -10,9 +10,9 @@
assert api.PyDate_Check(w_date)
assert api.PyDate_CheckExact(w_date)
- assert api.PyDateTime_GET_YEAR(w_date) == 2010
- assert api.PyDateTime_GET_MONTH(w_date) == 6
- assert api.PyDateTime_GET_DAY(w_date) == 3
+ assert api._PyDateTime_GET_YEAR(w_date) == 2010
+ assert api._PyDateTime_GET_MONTH(w_date) == 6
+ assert api._PyDateTime_GET_DAY(w_date) == 3
def test_time(self, space, api):
w_time = api.PyTime_FromTime(23, 15, 40, 123456)
@@ -21,10 +21,10 @@
assert api.PyTime_Check(w_time)
assert api.PyTime_CheckExact(w_time)
- assert api.PyDateTime_TIME_GET_HOUR(w_time) == 23
- assert api.PyDateTime_TIME_GET_MINUTE(w_time) == 15
- assert api.PyDateTime_TIME_GET_SECOND(w_time) == 40
- assert api.PyDateTime_TIME_GET_MICROSECOND(w_time) == 123456
+ assert api._PyDateTime_TIME_GET_HOUR(w_time) == 23
+ assert api._PyDateTime_TIME_GET_MINUTE(w_time) == 15
+ assert api._PyDateTime_TIME_GET_SECOND(w_time) == 40
+ assert api._PyDateTime_TIME_GET_MICROSECOND(w_time) == 123456
def test_datetime(self, space, api):
w_date = api.PyDateTime_FromDateAndTime(
@@ -36,13 +36,13 @@
assert api.PyDate_Check(w_date)
assert not api.PyDate_CheckExact(w_date)
- assert api.PyDateTime_GET_YEAR(w_date) == 2010
- assert api.PyDateTime_GET_MONTH(w_date) == 6
- assert api.PyDateTime_GET_DAY(w_date) == 3
- assert api.PyDateTime_DATE_GET_HOUR(w_date) == 23
- assert api.PyDateTime_DATE_GET_MINUTE(w_date) == 15
- assert api.PyDateTime_DATE_GET_SECOND(w_date) == 40
- assert api.PyDateTime_DATE_GET_MICROSECOND(w_date) == 123456
+ assert api._PyDateTime_GET_YEAR(w_date) == 2010
+ assert api._PyDateTime_GET_MONTH(w_date) == 6
+ assert api._PyDateTime_GET_DAY(w_date) == 3
+ assert api._PyDateTime_DATE_GET_HOUR(w_date) == 23
+ assert api._PyDateTime_DATE_GET_MINUTE(w_date) == 15
+ assert api._PyDateTime_DATE_GET_SECOND(w_date) == 40
+ assert api._PyDateTime_DATE_GET_MICROSECOND(w_date) == 123456
def test_delta(self, space, api):
w_delta = space.appexec(
@@ -57,9 +57,9 @@
assert api.PyDelta_Check(w_delta)
assert api.PyDelta_CheckExact(w_delta)
- assert api.PyDateTime_DELTA_GET_DAYS(w_delta) == 10
- assert api.PyDateTime_DELTA_GET_SECONDS(w_delta) == 20
- assert api.PyDateTime_DELTA_GET_MICROSECONDS(w_delta) == 30
+ assert api._PyDateTime_DELTA_GET_DAYS(w_delta) == 10
+ assert api._PyDateTime_DELTA_GET_SECONDS(w_delta) == 20
+ assert api._PyDateTime_DELTA_GET_MICROSECONDS(w_delta) == 30
def test_fromtimestamp(self, space, api):
w_args = space.wrap((0,))
@@ -117,3 +117,106 @@
datetime.timedelta,
datetime.tzinfo)
module.clear_types()
+
+ def test_macros(self):
+ module = self.import_extension('foo', [
+ ("test_date_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyDate_FromDate(2000, 6, 6);
+ PyDateTime_Date* d = (PyDateTime_Date*)obj;
+
+ PyDateTime_GET_YEAR(obj);
+ PyDateTime_GET_YEAR(d);
+
+ PyDateTime_GET_MONTH(obj);
+ PyDateTime_GET_MONTH(d);
+
+ PyDateTime_GET_DAY(obj);
+ PyDateTime_GET_DAY(d);
+
+ return obj;
+ """),
+ ("test_datetime_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyDateTime_FromDateAndTime(2000, 6, 6, 6, 6, 6, 6);
+ PyDateTime_DateTime* dt = (PyDateTime_DateTime*)obj;
+
+ PyDateTime_GET_YEAR(obj);
+ PyDateTime_GET_YEAR(dt);
+
+ PyDateTime_GET_MONTH(obj);
+ PyDateTime_GET_MONTH(dt);
+
+ PyDateTime_GET_DAY(obj);
+ PyDateTime_GET_DAY(dt);
+
+ PyDateTime_DATE_GET_HOUR(obj);
+ PyDateTime_DATE_GET_HOUR(dt);
+
+ PyDateTime_DATE_GET_MINUTE(obj);
+ PyDateTime_DATE_GET_MINUTE(dt);
+
+ PyDateTime_DATE_GET_SECOND(obj);
+ PyDateTime_DATE_GET_SECOND(dt);
+
+ PyDateTime_DATE_GET_MICROSECOND(obj);
+ PyDateTime_DATE_GET_MICROSECOND(dt);
+
+ return obj;
+ """),
+ ("test_time_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyTime_FromTime(6, 6, 6, 6);
+ PyDateTime_Time* t = (PyDateTime_Time*)obj;
+
+ PyDateTime_TIME_GET_HOUR(obj);
+ PyDateTime_TIME_GET_HOUR(t);
+
+ PyDateTime_TIME_GET_MINUTE(obj);
+ PyDateTime_TIME_GET_MINUTE(t);
+
+ PyDateTime_TIME_GET_SECOND(obj);
+ PyDateTime_TIME_GET_SECOND(t);
+
+ PyDateTime_TIME_GET_MICROSECOND(obj);
+ PyDateTime_TIME_GET_MICROSECOND(t);
+
+ return obj;
+ """),
+ ("test_delta_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyDelta_FromDSU(6, 6, 6);
+ PyDateTime_Delta* delta = (PyDateTime_Delta*)obj;
+
+ PyDateTime_DELTA_GET_DAYS(obj);
+ PyDateTime_DELTA_GET_DAYS(delta);
+
+ PyDateTime_DELTA_GET_SECONDS(obj);
+ PyDateTime_DELTA_GET_SECONDS(delta);
+
+ PyDateTime_DELTA_GET_MICROSECONDS(obj);
+ PyDateTime_DELTA_GET_MICROSECONDS(delta);
+
+ return obj;
+ """),
+ ])
diff --git a/pypy/module/cpyext/test/test_floatobject.py b/pypy/module/cpyext/test/test_floatobject.py
--- a/pypy/module/cpyext/test/test_floatobject.py
+++ b/pypy/module/cpyext/test/test_floatobject.py
@@ -6,7 +6,7 @@
def test_floatobject(self, space, api):
assert space.unwrap(api.PyFloat_FromDouble(3.14)) == 3.14
assert api.PyFloat_AsDouble(space.wrap(23.45)) == 23.45
- assert api.PyFloat_AS_DOUBLE(space.wrap(23.45)) == 23.45
+ assert api._PyFloat_AS_DOUBLE(space.wrap(23.45)) == 23.45
assert api.PyFloat_AsDouble(space.w_None) == -1
api.PyErr_Clear()
@@ -77,3 +77,19 @@
neginf = module.return_neginf()
assert neginf < 0
assert math.isinf(neginf)
+
+ def test_macro_accepts_wrong_pointer_type(self):
+ import math
+
+ module = self.import_extension('foo', [
+ ("test_macros", "METH_NOARGS",
+ """
+ PyObject* o = PyFloat_FromDouble(1.0);
+ // no PyFloatObject
+ char* dumb_pointer = (char*)o;
+
+ PyFloat_AS_DOUBLE(o);
+ PyFloat_AS_DOUBLE(dumb_pointer);
+
+ Py_RETURN_NONE;"""),
+ ])
diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py
--- a/pypy/module/cpyext/test/test_intobject.py
+++ b/pypy/module/cpyext/test/test_intobject.py
@@ -9,7 +9,7 @@
assert not api.PyInt_Check(space.wrap((1, 2, 3)))
for i in [3, -5, -1, -sys.maxint, sys.maxint - 1]:
x = api.PyInt_AsLong(space.wrap(i))
- y = api.PyInt_AS_LONG(space.wrap(i))
+ y = api._PyInt_AS_LONG(space.wrap(i))
assert x == i
assert y == i
w_x = api.PyInt_FromLong(x + 1)
@@ -191,3 +191,17 @@
i = mod.test_int()
assert isinstance(i, int)
assert i == 42
+
+ def test_int_macros(self):
+ mod = self.import_extension('foo', [
+ ("test_macros", "METH_NOARGS",
+ """
+ PyObject * obj = PyInt_FromLong(42);
+ PyIntObject * i = (PyIntObject*)obj;
+ PyInt_AS_LONG(obj);
+ PyInt_AS_LONG(i);
+ Py_RETURN_NONE;
+ """
+ ),
+ ])
+
diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py
--- a/pypy/module/cpyext/test/test_listobject.py
+++ b/pypy/module/cpyext/test/test_listobject.py
@@ -22,9 +22,9 @@
def test_get_size(self, space, api):
l = api.PyList_New(0)
- assert api.PyList_GET_SIZE(l) == 0
+ assert api._PyList_GET_SIZE(l) == 0
api.PyList_Append(l, space.wrap(3))
- assert api.PyList_GET_SIZE(l) == 1
+ assert api._PyList_GET_SIZE(l) == 1
def test_size(self, space, api):
l = space.newlist([space.w_None, space.w_None])
@@ -137,6 +137,33 @@
module.setlistitem(l,0)
assert l == [None, 2, 3]
+ def test_list_macros(self):
+ """The PyList_* macros cast, and calls expecting that build."""
+ module = self.import_extension('foo', [
+ ("test_macro_invocations", "METH_NOARGS",
+ """
+ PyObject* o = PyList_New(2);
+ PyListObject* l = (PyListObject*)o;
+
+
+ Py_INCREF(o);
+ PyList_SET_ITEM(o, 0, o);
+ Py_INCREF(o);
+ PyList_SET_ITEM(l, 1, o);
+
+ PyList_GET_ITEM(o, 0);
+ PyList_GET_ITEM(l, 1);
+
+ PyList_GET_SIZE(o);
+ PyList_GET_SIZE(l);
+
+ return o;
+ """
+ )
+ ])
+ x = module.test_macro_invocations()
+ assert x[0] is x[1] is x
+
def test_get_item_macro(self):
module = self.import_extension('foo', [
("test_get_item", "METH_NOARGS",
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -14,8 +14,8 @@
w_l = space.wrap([1, 2, 3, 4])
assert api.PySequence_Fast(w_l, "message") is w_l
- assert space.int_w(api.PySequence_Fast_GET_ITEM(w_l, 1)) == 2
- assert api.PySequence_Fast_GET_SIZE(w_l) == 4
+ assert space.int_w(api._PySequence_Fast_GET_ITEM(w_l, 1)) == 2
+ assert api._PySequence_Fast_GET_SIZE(w_l) == 4
w_set = space.wrap(set((1, 2, 3, 4)))
w_seq = api.PySequence_Fast(w_set, "message")
@@ -130,7 +130,7 @@
result = api.PySequence_GetItem(w_l, 4)
assert space.is_true(space.eq(result, space.wrap(4)))
- result = api.PySequence_ITEM(w_l, 4)
+ result = api._PySequence_ITEM(w_l, 4)
assert space.is_true(space.eq(result, space.wrap(4)))
self.raises(space, api, IndexError, api.PySequence_GetItem, w_l, 9000)
@@ -155,6 +155,28 @@
result = api.PySequence_Index(w_gen, w_tofind)
assert result == 4
+class AppTestSetObject(AppTestCpythonExtensionBase):
+ def test_sequence_macro_cast(self):
+ module = self.import_extension('foo', [
+ ("test_macro_cast", "METH_NOARGS",
+ """
+ PyObject* o = PyList_New(0);
+ PyList_Append(o, o);
+ PyListObject* l = (PyListObject*)o;
+
+ PySequence_Fast_GET_ITEM(o, 0);
+ PySequence_Fast_GET_ITEM(l, 0);
+
+ PySequence_Fast_GET_SIZE(o);
+ PySequence_Fast_GET_SIZE(l);
+
+ PySequence_ITEM(o, 0);
+ PySequence_ITEM(l, 0);
+
+ return o;
+ """
+ )
+ ])
class TestCPyListStrategy(BaseApiTest):
def test_getitem_setitem(self, space, api):
w_l = space.wrap([1, 2, 3, 4])
diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py
--- a/pypy/module/cpyext/test/test_setobject.py
+++ b/pypy/module/cpyext/test/test_setobject.py
@@ -2,6 +2,7 @@
from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref
from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from rpython.rtyper.lltypesystem import rffi, lltype
@@ -13,7 +14,7 @@
w_set = space.call_function(space.w_set)
space.call_method(w_set, 'update', space.wrap([1,2,3,4]))
assert api.PySet_Size(w_set) == 4
- assert api.PySet_GET_SIZE(w_set) == 4
+ assert api._PySet_GET_SIZE(w_set) == 4
raises(TypeError, api.PySet_Size(space.newlist([])))
api.PyErr_Clear()
@@ -45,3 +46,20 @@
w_frozenset = space.newfrozenset([space.wrap(i) for i in [1, 2, 3, 4]])
assert api.PyAnySet_CheckExact(w_set)
assert api.PyAnySet_CheckExact(w_frozenset)
+
+class AppTestSetObject(AppTestCpythonExtensionBase):
+ def test_set_macro_cast(self):
+ module = self.import_extension('foo', [
+ ("test_macro_cast", "METH_NOARGS",
+ """
+ PyObject* o = PySet_New(NULL);
+ // no PySetObject
+ char* dumb_pointer = (char*) o;
+
+ PySet_GET_SIZE(o);
+ PySet_GET_SIZE(dumb_pointer);
+
+ return o;
+ """
+ )
+ ])
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -111,12 +111,32 @@
assert isinstance(res, str)
assert res == 'caf?'
+ def test_unicode_macros(self):
+ """The PyUnicode_* macros cast, and calls expecting that build."""
+ module = self.import_extension('foo', [
+ ("test_macro_invocations", "METH_NOARGS",
+ """
+ PyObject* o = PyUnicode_FromString("");
+ PyUnicodeObject* u = (PyUnicodeObject*)o;
+
+ PyUnicode_GET_SIZE(u);
+ PyUnicode_GET_SIZE(o);
+
+ PyUnicode_GET_DATA_SIZE(u);
+ PyUnicode_GET_DATA_SIZE(o);
+
+ PyUnicode_AS_UNICODE(o);
+ PyUnicode_AS_UNICODE(u);
+ return o;
+ """)])
+ assert module.test_macro_invocations() == u''
+
class TestUnicode(BaseApiTest):
def test_unicodeobject(self, space, api):
- assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4
+ assert api._PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4
assert api.PyUnicode_GetSize(space.wrap(u'sp�m')) == 4
unichar = rffi.sizeof(Py_UNICODE)
- assert api.PyUnicode_GET_DATA_SIZE(space.wrap(u'sp�m')) == 4 * unichar
+ assert api._PyUnicode_GET_DATA_SIZE(space.wrap(u'sp�m')) == 4 * unichar
encoding = rffi.charp2str(api.PyUnicode_GetDefaultEncoding())
w_default_encoding = space.call_function(
@@ -140,7 +160,7 @@
def test_AS(self, space, api):
word = space.wrap(u'spam')
array = rffi.cast(rffi.CWCHARP, api.PyUnicode_AS_DATA(word))
- array2 = api.PyUnicode_AS_UNICODE(word)
+ array2 = api._PyUnicode_AS_UNICODE(word)
array3 = api.PyUnicode_AsUnicode(word)
for (i, char) in enumerate(space.unwrap(word)):
assert array[i] == char
@@ -478,13 +498,13 @@
count1 = space.int_w(space.len(w_x))
target_chunk = lltype.malloc(rffi.CWCHARP.TO, count1, flavor='raw')
- x_chunk = api.PyUnicode_AS_UNICODE(w_x)
+ x_chunk = api._PyUnicode_AS_UNICODE(w_x)
api.Py_UNICODE_COPY(target_chunk, x_chunk, 4)
w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, 4))
assert space.eq_w(w_y, space.wrap(u"abcd"))
- size = api.PyUnicode_GET_SIZE(w_x)
+ size = api._PyUnicode_GET_SIZE(w_x)
api.Py_UNICODE_COPY(target_chunk, x_chunk, size)
w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, size))
diff --git a/pypy/module/cpyext/test/test_weakref.py b/pypy/module/cpyext/test/test_weakref.py
--- a/pypy/module/cpyext/test/test_weakref.py
+++ b/pypy/module/cpyext/test/test_weakref.py
@@ -7,7 +7,6 @@
w_ref = api.PyWeakref_NewRef(w_obj, space.w_None)
assert w_ref is not None
assert space.is_w(api.PyWeakref_GetObject(w_ref), w_obj)
- assert space.is_w(api.PyWeakref_GET_OBJECT(w_ref), w_obj)
assert space.is_w(api.PyWeakref_LockObject(w_ref), w_obj)
w_obj = space.newtuple([])
@@ -34,3 +33,25 @@
del w_obj
import gc; gc.collect()
assert space.is_w(api.PyWeakref_LockObject(w_ref), space.w_None)
+
+
+class AppTestWeakReference(AppTestCpythonExtensionBase):
+
+ def test_weakref_macro(self):
+ module = self.import_extension('foo', [
+ ("test_macro_cast", "METH_NOARGS",
+ """
+ // PyExc_Warning is some weak-reffable PyObject*.
+ PyObject* weakref_obj = PyWeakref_NewRef(PyExc_Warning, NULL);
+ if (!weakref_obj) return weakref_obj;
+ // No public PyWeakReference type.
+ char* dumb_pointer = (char*) weakref_obj;
+
+ PyWeakref_GET_OBJECT(weakref_obj);
+ PyWeakref_GET_OBJECT(dumb_pointer);
+
+ return weakref_obj;
+ """
+ )
+ ])
+ module.test_macro_cast()
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -192,23 +192,23 @@
def PyUnicode_AS_DATA(space, ref):
"""Return a pointer to the internal buffer of the object. o has to be a
PyUnicodeObject (not checked)."""
- return rffi.cast(rffi.CCHARP, PyUnicode_AS_UNICODE(space, ref))
+ return rffi.cast(rffi.CCHARP, _PyUnicode_AS_UNICODE(space, ref))
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
-def PyUnicode_GET_DATA_SIZE(space, w_obj):
+def _PyUnicode_GET_DATA_SIZE(space, w_obj):
"""Return the size of the object's internal buffer in bytes. o has to be a
PyUnicodeObject (not checked)."""
- return rffi.sizeof(lltype.UniChar) * PyUnicode_GET_SIZE(space, w_obj)
+ return rffi.sizeof(lltype.UniChar) * _PyUnicode_GET_SIZE(space, w_obj)
@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
-def PyUnicode_GET_SIZE(space, w_obj):
+def _PyUnicode_GET_SIZE(space, w_obj):
"""Return the size of the object. o has to be a PyUnicodeObject (not
checked)."""
assert isinstance(w_obj, unicodeobject.W_UnicodeObject)
return space.len_w(w_obj)
@cpython_api([PyObject], rffi.CWCHARP, error=CANNOT_FAIL)
-def PyUnicode_AS_UNICODE(space, ref):
+def _PyUnicode_AS_UNICODE(space, ref):
"""Return a pointer to the internal Py_UNICODE buffer of the object. ref
has to be a PyUnicodeObject (not checked)."""
ref_unicode = rffi.cast(PyUnicodeObject, ref)
@@ -227,7 +227,7 @@
w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type))
if not space.is_true(space.issubtype(w_type, space.w_unicode)):
raise oefmt(space.w_TypeError, "expected unicode object")
- return PyUnicode_AS_UNICODE(space, ref)
+ return _PyUnicode_AS_UNICODE(space, ref)
@cpython_api([PyObject], Py_ssize_t, error=-1)
def PyUnicode_GetSize(space, ref):
@@ -247,7 +247,7 @@
string may or may not be 0-terminated. It is the responsibility of the caller
to make sure that the wchar_t string is 0-terminated in case this is
required by the application."""
- c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
+ c_str = _PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
c_length = ref.c_length
# If possible, try to copy the 0-termination as well
diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py
--- a/pypy/module/cpyext/weakrefobject.py
+++ b/pypy/module/cpyext/weakrefobject.py
@@ -37,13 +37,6 @@
"""
return space.call_function(w_ref) # borrowed ref
- at cpython_api([PyObject], PyObject, result_borrowed=True)
-def PyWeakref_GET_OBJECT(space, w_ref):
- """Similar to PyWeakref_GetObject(), but implemented as a macro that does no
- error checking.
- """
- return space.call_function(w_ref) # borrowed ref
-
@cpython_api([PyObject], PyObject)
def PyWeakref_LockObject(space, w_ref):
"""Return the referenced object from a weak reference. If the referent is
From pypy.commits at gmail.com Fri May 6 20:54:33 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Fri, 06 May 2016 17:54:33 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: __qualname__ must be removed from the type
dict so it doesn't propagate down to
Message-ID: <572d3cc9.a272c20a.dbfb.ffffe27c@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84273:2b6f54d085d2
Date: 2016-05-06 17:53 -0700
http://bitbucket.org/pypy/pypy/changeset/2b6f54d085d2/
Log: __qualname__ must be removed from the type dict so it doesn't
propagate down to the instance
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -57,8 +57,6 @@
assert a.__dict__ != b.__dict__
assert a.__dict__ != {'123': '456'}
assert {'123': '456'} != a.__dict__
- b.__dict__.pop('__qualname__')
- c.__dict__.pop('__qualname__')
assert b.__dict__ == c.__dict__
def test_str_repr(self):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -1103,8 +1103,9 @@
layout = create_all_slots(w_self, hasoldstylebase, w_bestbase,
force_new_layout)
- if '__qualname__' in w_self.dict_w:
- w_self.qualname = w_self.space.unicode_w(w_self.dict_w['__qualname__'])
+ w_qualname = w_self.dict_w.pop('__qualname__', None)
+ if w_qualname is not None:
+ w_self.qualname = w_self.space.unicode_w(w_qualname)
ensure_common_attributes(w_self)
return layout
From pypy.commits at gmail.com Sat May 7 03:18:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 00:18:44 -0700 (PDT)
Subject: [pypy-commit] cffi default: Add blurb about pyenv and ucs2/ucs4 in
general
Message-ID: <572d96d4.634fc20a.fbf4e.2613@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2686:381ffc5c8a6b
Date: 2016-05-07 09:19 +0200
http://bitbucket.org/cffi/cffi/changeset/381ffc5c8a6b/
Log: Add blurb about pyenv and ucs2/ucs4 in general
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
--- a/doc/source/installation.rst
+++ b/doc/source/installation.rst
@@ -161,3 +161,21 @@
.. _`issue 9`: https://bitbucket.org/cffi/cffi/issue/9
.. _`Python issue 7546`: http://bugs.python.org/issue7546
+
+
+Linux and OS/X: UCS2 versus UCS4
+++++++++++++++++++++++++++++++++
+
+This is about getting an error like ``Symbol not found:
+_PyUnicodeUCS2_AsASCIIString``. This error occurs in Python 2 as soon
+as you mix "ucs2" and "ucs4" builds of Python.
+
+If you are using ``pyenv``, then see
+https://github.com/yyuu/pyenv/issues/257.
+
+Otherwise, you can download the sources of CFFI (instead of a prebuilt
+binary) and make sure that you build it with the same version of Python
+that will use it. For example, if you use ``virtualenv ~/venv``, then
+``. ~/venv/bin/activate``, then you are sure that running ``python
+setup.py install`` inside a copy of the sources of CFFI will build CFFI
+using exactly the version of Python from this virtualenv.
From pypy.commits at gmail.com Sat May 7 03:21:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 00:21:57 -0700 (PDT)
Subject: [pypy-commit] cffi default: Fix the next version number (not for
immediate release!)
Message-ID: <572d9795.89cbc20a.44a3.2067@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2687:ab0396f739ff
Date: 2016-05-07 09:22 +0200
http://bitbucket.org/cffi/cffi/changeset/ab0396f739ff/
Log: Fix the next version number (not for immediate release!)
diff --git a/doc/source/ref.rst b/doc/source/ref.rst
--- a/doc/source/ref.rst
+++ b/doc/source/ref.rst
@@ -321,7 +321,7 @@
**ffi.gc(ptr, None)**: removes the ownership on a object returned by a
regular call to ``ffi.gc``, and no destructor will be called when it
is garbage-collected. The object is modified in-place, and the
-function returns ``None``.
+function returns ``None``. *New in version 1.7: ffi.gc(ptr, None)*
Note that this should be avoided for large memory allocations or
for limited resources. This is particularly true on PyPy: its GC does
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -3,8 +3,10 @@
======================
-v1.next
-=======
+v1.7
+====
+
+(NOT RELEASED YET)
* ``ffi.gc(p, None)`` removes the destructor on an object previously
created by another call to ``ffi.gc()``
From pypy.commits at gmail.com Sat May 7 03:34:16 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 00:34:16 -0700 (PDT)
Subject: [pypy-commit] cffi default: expand
Message-ID: <572d9a78.21f9c20a.d72fa.36d1@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2688:f50574a39ee8
Date: 2016-05-07 09:34 +0200
http://bitbucket.org/cffi/cffi/changeset/f50574a39ee8/
Log: expand
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
--- a/doc/source/installation.rst
+++ b/doc/source/installation.rst
@@ -166,16 +166,28 @@
Linux and OS/X: UCS2 versus UCS4
++++++++++++++++++++++++++++++++
-This is about getting an error like ``Symbol not found:
-_PyUnicodeUCS2_AsASCIIString``. This error occurs in Python 2 as soon
-as you mix "ucs2" and "ucs4" builds of Python.
+This is about getting an ImportError about ``_cffi_backend.so`` with a
+message like ``Symbol not found: _PyUnicodeUCS2_AsASCIIString``. This
+error occurs in Python 2 as soon as you mix "ucs2" and "ucs4" builds of
+Python. It means that you are now running a Python compiled with
+"ucs4", but the extension module ``_cffi_backend.so`` was compiled by a
+different Python: one that was running "ucs2". (If the opposite problem
+occurs, you get an error about ``_PyUnicodeUCS4_AsASCIIString``
+instead.)
If you are using ``pyenv``, then see
https://github.com/yyuu/pyenv/issues/257.
-Otherwise, you can download the sources of CFFI (instead of a prebuilt
-binary) and make sure that you build it with the same version of Python
-that will use it. For example, if you use ``virtualenv ~/venv``, then
-``. ~/venv/bin/activate``, then you are sure that running ``python
-setup.py install`` inside a copy of the sources of CFFI will build CFFI
-using exactly the version of Python from this virtualenv.
+More generally, the solution that should always work is to download the
+sources of CFFI (instead of a prebuilt binary) and make sure that you
+build it with the same version of Python than the one that will use it.
+For example, with virtualenv:
+
+* ``virtualenv ~/venv``
+
+* ``cd ~/path/to/sources/of/cffi``
+
+* ``~/venv/bin/python setup.py install``
+
+This will compile and install CFFI in this virtualenv, using the
+Python from this virtualenv.
From pypy.commits at gmail.com Sat May 7 03:38:58 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 00:38:58 -0700 (PDT)
Subject: [pypy-commit] cffi default: just to make sure
Message-ID: <572d9b92.50301c0a.f1d1a.0fe7@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2689:ded7c0d8c44a
Date: 2016-05-07 09:39 +0200
http://bitbucket.org/cffi/cffi/changeset/ded7c0d8c44a/
Log: just to make sure
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
--- a/doc/source/installation.rst
+++ b/doc/source/installation.rst
@@ -187,6 +187,9 @@
* ``cd ~/path/to/sources/of/cffi``
+* ``~/venv/bin/python setup.py build --force`` # forcing a rebuild to
+ make sure
+
* ``~/venv/bin/python setup.py install``
This will compile and install CFFI in this virtualenv, using the
From pypy.commits at gmail.com Sat May 7 06:03:28 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 03:03:28 -0700 (PDT)
Subject: [pypy-commit] cffi default: updates
Message-ID: <572dbd70.0c1b1c0a.e67cd.4e3e@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2690:aaad62d58492
Date: 2016-05-07 12:04 +0200
http://bitbucket.org/cffi/cffi/changeset/aaad62d58492/
Log: updates
diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst
--- a/doc/source/cdef.rst
+++ b/doc/source/cdef.rst
@@ -598,7 +598,8 @@
All of the ANSI C *declarations* should be supported in ``cdef()``,
and some of C99. (This excludes any ``#include`` or ``#ifdef``.)
-Known missing features that are GCC or MSVC extensions:
+Known missing features that are either in C99, or are GCC or MSVC
+extensions:
* Any ``__attribute__`` or ``#pragma pack(n)``
@@ -613,9 +614,6 @@
foo_wrapper(struct my_complex c) { foo(c.real + c.imag*1j); }``, and
call ``foo_wrapper`` rather than ``foo`` directly.
-* Function pointers with non-default calling conventions (e.g. on
- Windows, "stdcall").
-
Note that declarations like ``int field[];`` in
structures are interpreted as variable-length structures. Declarations
like ``int field[...];`` on the other hand are arrays whose length is
From pypy.commits at gmail.com Sat May 7 08:15:10 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 05:15:10 -0700 (PDT)
Subject: [pypy-commit] cffi default: Issue #255: `bool(ffi.cast("primitive",
x))` is now True or False
Message-ID: <572ddc4e.8a37c20a.4d8f2.ffff9797@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2691:e7ca388b0197
Date: 2016-05-07 14:15 +0200
http://bitbucket.org/cffi/cffi/changeset/e7ca388b0197/
Log: Issue #255: `bool(ffi.cast("primitive", x))` is now True or False
depending on whether the value is zero or not. It used to always be
True for any value.
diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c
--- a/c/_cffi_backend.c
+++ b/c/_cffi_backend.c
@@ -1858,6 +1858,18 @@
static int cdata_nonzero(CDataObject *cd)
{
+ if (cd->c_type->ct_flags & CT_PRIMITIVE_ANY) {
+ if (cd->c_type->ct_flags & (CT_PRIMITIVE_SIGNED |
+ CT_PRIMITIVE_UNSIGNED |
+ CT_PRIMITIVE_CHAR))
+ return read_raw_unsigned_data(cd->c_data, cd->c_type->ct_size) != 0;
+
+ if (cd->c_type->ct_flags & CT_PRIMITIVE_FLOAT) {
+ if (cd->c_type->ct_flags & CT_IS_LONGDOUBLE)
+ return read_raw_longdouble_data(cd->c_data) != 0.0;
+ return read_raw_float_data(cd->c_data, cd->c_type->ct_size) != 0.0;
+ }
+ }
return cd->c_data != NULL;
}
diff --git a/c/test_c.py b/c/test_c.py
--- a/c/test_c.py
+++ b/c/test_c.py
@@ -152,7 +152,10 @@
INF = 1E200 * 1E200
for name in ["float", "double"]:
p = new_primitive_type(name)
- assert bool(cast(p, 0))
+ assert bool(cast(p, 0)) is False # since 1.7
+ assert bool(cast(p, -0.0)) is False # since 1.7
+ assert bool(cast(p, 1e-42)) is True
+ assert bool(cast(p, -1e-42)) is True
assert bool(cast(p, INF))
assert bool(cast(p, -INF))
assert int(cast(p, -150)) == -150
@@ -213,7 +216,8 @@
def test_character_type():
p = new_primitive_type("char")
- assert bool(cast(p, '\x00'))
+ assert bool(cast(p, 'A')) is True
+ assert bool(cast(p, '\x00')) is False # since 1.7
assert cast(p, '\x00') != cast(p, -17*256)
assert int(cast(p, 'A')) == 65
assert long(cast(p, 'A')) == 65
@@ -2569,7 +2573,8 @@
BBoolP = new_pointer_type(BBool)
assert int(cast(BBool, False)) == 0
assert int(cast(BBool, True)) == 1
- assert bool(cast(BBool, False)) is True # warning!
+ assert bool(cast(BBool, False)) is False # since 1.7
+ assert bool(cast(BBool, True)) is True
assert int(cast(BBool, 3)) == 1
assert int(cast(BBool, long(3))) == 1
assert int(cast(BBool, long(10)**4000)) == 1
From pypy.commits at gmail.com Sat May 7 08:29:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 05:29:04 -0700 (PDT)
Subject: [pypy-commit] cffi default: Document e7ca388b0197
Message-ID: <572ddf90.43ecc20a.eb509.ffff8fa0@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2692:4aca604a897f
Date: 2016-05-07 14:19 +0200
http://bitbucket.org/cffi/cffi/changeset/4aca604a897f/
Log: Document e7ca388b0197
diff --git a/doc/source/ref.rst b/doc/source/ref.rst
--- a/doc/source/ref.rst
+++ b/doc/source/ref.rst
@@ -508,24 +508,24 @@
+---------------+------------------------+------------------+----------------+
| C type | writing into | reading from |other operations|
+===============+========================+==================+================+
-| integers | an integer or anything | a Python int or | int() |
-| and enums | on which int() works | long, depending | |
+| integers | an integer or anything | a Python int or | int(), bool() |
+| and enums | on which int() works | long, depending | `(******)` |
| `(*****)` | (but not a float!). | on the type | |
| | Must be within range. | | |
+---------------+------------------------+------------------+----------------+
-| ``char`` | a string of length 1 | a string of | int() |
+| ``char`` | a string of length 1 | a string of | int(), bool() |
| | or another | length 1 | |
+---------------+------------------------+------------------+----------------+
| ``wchar_t`` | a unicode of length 1 | a unicode of | |
-| | (or maybe 2 if | length 1 | int() |
+| | (or maybe 2 if | length 1 | int(), bool() |
| | surrogates) or | (or maybe 2 if | |
| | another | surrogates) | |
+---------------+------------------------+------------------+----------------+
-| ``float``, | a float or anything on | a Python float | float(), int() |
-| ``double`` | which float() works | | |
+| ``float``, | a float or anything on | a Python float | float(), int(),|
+| ``double`` | which float() works | | bool() |
+---------------+------------------------+------------------+----------------+
-|``long double``| another with | a , to | float(), int() |
-| | a ``long double``, or | avoid loosing | |
+|``long double``| another with | a , to | float(), int(),|
+| | a ``long double``, or | avoid loosing | bool() |
| | anything on which | precision `(***)`| |
| | float() works | | |
+---------------+------------------------+------------------+----------------+
@@ -635,3 +635,8 @@
compare their value symbolically, use code like ``if x.field ==
lib.FOO``. If you really want to get their value as a string, use
``ffi.string(ffi.cast("the_enum_type", x.field))``.
+
+`(******)` bool() on a primitive cdata:
+
+ *New in version 1.7.* In previous versions, it only worked on
+ pointers; for primitives it always returned True.
From pypy.commits at gmail.com Sat May 7 08:29:05 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 05:29:05 -0700 (PDT)
Subject: [pypy-commit] cffi default: an extra test
Message-ID: <572ddf91.4ca51c0a.f2226.ffff8573@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2693:4e890638f9d1
Date: 2016-05-07 14:29 +0200
http://bitbucket.org/cffi/cffi/changeset/4e890638f9d1/
Log: an extra test
diff --git a/c/test_c.py b/c/test_c.py
--- a/c/test_c.py
+++ b/c/test_c.py
@@ -158,6 +158,7 @@
assert bool(cast(p, -1e-42)) is True
assert bool(cast(p, INF))
assert bool(cast(p, -INF))
+ assert bool(cast(p, float("nan")))
assert int(cast(p, -150)) == -150
assert int(cast(p, 61.91)) == 61
assert long(cast(p, 61.91)) == 61
From pypy.commits at gmail.com Sat May 7 08:48:18 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 05:48:18 -0700 (PDT)
Subject: [pypy-commit] pypy default: update to cffi/4d19ce180883
Message-ID: <572de412.83e21c0a.2f8b6.19bf@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84274:e46b5cbc8d94
Date: 2016-05-07 14:48 +0200
http://bitbucket.org/pypy/pypy/changeset/e46b5cbc8d94/
Log: update to cffi/4d19ce180883
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -365,8 +365,16 @@
return self.ctype.size
def with_gc(self, w_destructor):
+ space = self.space
+ if space.is_none(w_destructor):
+ if isinstance(self, W_CDataGCP):
+ self.w_destructor = None
+ return space.w_None
+ raise oefmt(space.w_TypeError,
+ "Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
with self as ptr:
- return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor)
+ return W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
def unpack(self, length):
from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
@@ -538,7 +546,7 @@
class W_CDataGCP(W_CData):
"""For ffi.gc()."""
_attrs_ = ['w_original_cdata', 'w_destructor']
- _immutable_fields_ = ['w_original_cdata', 'w_destructor']
+ _immutable_fields_ = ['w_original_cdata']
def __init__(self, space, cdata, ctype, w_original_cdata, w_destructor):
W_CData.__init__(self, space, cdata, ctype)
@@ -552,7 +560,10 @@
def call_destructor(self):
assert isinstance(self, W_CDataGCP)
- self.space.call_function(self.w_destructor, self.w_original_cdata)
+ w_destructor = self.w_destructor
+ if w_destructor is not None:
+ self.w_destructor = None
+ self.space.call_function(w_destructor, self.w_original_cdata)
W_CData.typedef = TypeDef(
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -331,6 +331,25 @@
gc.collect()
assert seen == [1]
+ def test_ffi_gc_disable(self):
+ import _cffi_backend as _cffi1_backend
+ ffi = _cffi1_backend.FFI()
+ p = ffi.new("int *", 123)
+ raises(TypeError, ffi.gc, p, None)
+ seen = []
+ q1 = ffi.gc(p, lambda p: seen.append(1))
+ q2 = ffi.gc(q1, lambda p: seen.append(2))
+ import gc; gc.collect()
+ assert seen == []
+ assert ffi.gc(q1, None) is None
+ del q1, q2
+ for i in range(5):
+ if seen:
+ break
+ import gc
+ gc.collect()
+ assert seen == [2]
+
def test_ffi_new_allocator_1(self):
import _cffi_backend as _cffi1_backend
ffi = _cffi1_backend.FFI()
From pypy.commits at gmail.com Sat May 7 08:48:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 05:48:20 -0700 (PDT)
Subject: [pypy-commit] pypy default: update to cffi/e7ca388b0197
Message-ID: <572de414.6322c20a.3786f.ffffac9a@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84275:6cad0aa044f7
Date: 2016-05-07 14:48 +0200
http://bitbucket.org/pypy/pypy/changeset/6cad0aa044f7/
Log: update to cffi/e7ca388b0197
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -71,7 +71,7 @@
def nonzero(self):
with self as ptr:
- nonzero = bool(ptr)
+ nonzero = self.ctype.nonzero(ptr)
return self.space.wrap(nonzero)
def int(self, space):
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -147,6 +147,9 @@
raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number",
self.name)
+ def nonzero(self, cdata):
+ return bool(cdata)
+
def insert_name(self, extra, extra_position):
name = '%s%s%s' % (self.name[:self.name_position],
extra,
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -93,6 +93,18 @@
return self.space.newlist_int(result)
return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length)
+ def nonzero(self, cdata):
+ if self.size <= rffi.sizeof(lltype.Signed):
+ value = misc.read_raw_long_data(cdata, self.size)
+ return value != 0
+ else:
+ return self._nonzero_longlong(cdata)
+
+ def _nonzero_longlong(self, cdata):
+ # in its own function: LONGLONG may make the whole function jit-opaque
+ value = misc.read_raw_signed_data(cdata, self.size)
+ return bool(value)
+
class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive):
_attrs_ = []
@@ -435,6 +447,9 @@
return self.space.newlist_float(result)
return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length)
+ def nonzero(self, cdata):
+ return misc.is_nonnull_float(cdata, self.size)
+
class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat):
_attrs_ = []
@@ -501,3 +516,7 @@
rffi.LONGDOUBLE, rffi.LONGDOUBLEP)
return True
return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob)
+
+ @jit.dont_look_inside
+ def nonzero(self, cdata):
+ return misc.is_nonnull_longdouble(cdata, self.size)
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -256,7 +256,7 @@
def is_nonnull_longdouble(cdata):
return _is_nonnull_longdouble(read_raw_longdouble_data(cdata))
def is_nonnull_float(cdata, size):
- return read_raw_float_data(cdata, size) != 0.0
+ return read_raw_float_data(cdata, size) != 0.0 # note: True if a NaN
def object_as_bool(space, w_ob):
# convert and cast a Python object to a boolean. Accept an integer
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -141,9 +141,13 @@
INF = 1E200 * 1E200
for name in ["float", "double"]:
p = new_primitive_type(name)
- assert bool(cast(p, 0))
+ assert bool(cast(p, 0)) is False # since 1.7
+ assert bool(cast(p, -0.0)) is False # since 1.7
+ assert bool(cast(p, 1e-42)) is True
+ assert bool(cast(p, -1e-42)) is True
assert bool(cast(p, INF))
assert bool(cast(p, -INF))
+ assert bool(cast(p, float("nan")))
assert int(cast(p, -150)) == -150
assert int(cast(p, 61.91)) == 61
assert long(cast(p, 61.91)) == 61
@@ -202,7 +206,8 @@
def test_character_type():
p = new_primitive_type("char")
- assert bool(cast(p, '\x00'))
+ assert bool(cast(p, 'A')) is True
+ assert bool(cast(p, '\x00')) is False # since 1.7
assert cast(p, '\x00') != cast(p, -17*256)
assert int(cast(p, 'A')) == 65
assert long(cast(p, 'A')) == 65
@@ -2558,7 +2563,8 @@
BBoolP = new_pointer_type(BBool)
assert int(cast(BBool, False)) == 0
assert int(cast(BBool, True)) == 1
- assert bool(cast(BBool, False)) is True # warning!
+ assert bool(cast(BBool, False)) is False # since 1.7
+ assert bool(cast(BBool, True)) is True
assert int(cast(BBool, 3)) == 1
assert int(cast(BBool, long(3))) == 1
assert int(cast(BBool, long(10)**4000)) == 1
From pypy.commits at gmail.com Sat May 7 09:01:26 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 06:01:26 -0700 (PDT)
Subject: [pypy-commit] pypy default: oops
Message-ID: <572de726.4106c20a.e4ae0.ffffe347@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84276:a6542e0224bf
Date: 2016-05-07 14:51 +0200
http://bitbucket.org/pypy/pypy/changeset/a6542e0224bf/
Log: oops
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -519,4 +519,4 @@
@jit.dont_look_inside
def nonzero(self, cdata):
- return misc.is_nonnull_longdouble(cdata, self.size)
+ return misc.is_nonnull_longdouble(cdata)
From pypy.commits at gmail.com Sat May 7 09:24:40 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 06:24:40 -0700 (PDT)
Subject: [pypy-commit] cffi default: document e7ca388b0197 in whatsnew
Message-ID: <572dec98.a82cc20a.62e83.ffffacf2@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2694:ffe3a7c191a6
Date: 2016-05-07 15:25 +0200
http://bitbucket.org/cffi/cffi/changeset/ffe3a7c191a6/
Log: document e7ca388b0197 in whatsnew
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -11,6 +11,11 @@
* ``ffi.gc(p, None)`` removes the destructor on an object previously
created by another call to ``ffi.gc()``
+* ``bool(ffi.cast("primitive type", x))`` now returns False if the
+ value is zero (including ``-0.0``), and True otherwise. Previously
+ this would only return False for cdata objects of a pointer type when
+ the pointer is NULL.
+
v1.6
====
From pypy.commits at gmail.com Sat May 7 09:45:34 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 06:45:34 -0700 (PDT)
Subject: [pypy-commit] pypy default: epoll.register() takes a second
argument that should default to
Message-ID: <572df17e.89141c0a.eea66.ffffa0fe@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84277:2a893a527c5e
Date: 2016-05-07 15:45 +0200
http://bitbucket.org/pypy/pypy/changeset/2a893a527c5e/
Log: epoll.register() takes a second argument that should default to a
specific value, not "-1".
epoll.modify() should not have the second argument optional at all.
diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py
--- a/pypy/module/select/interp_epoll.py
+++ b/pypy/module/select/interp_epoll.py
@@ -53,6 +53,10 @@
EPOLL_CTL_MOD = cconfig["EPOLL_CTL_MOD"]
EPOLL_CTL_DEL = cconfig["EPOLL_CTL_DEL"]
+DEF_REGISTER_EVENTMASK = (public_symbols["EPOLLIN"] |
+ public_symbols["EPOLLOUT"] |
+ public_symbols["EPOLLPRI"])
+
epoll_create = rffi.llexternal(
"epoll_create", [rffi.INT], rffi.INT, compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO
@@ -132,7 +136,7 @@
self.close()
@unwrap_spec(eventmask=int)
- def descr_register(self, space, w_fd, eventmask=-1):
+ def descr_register(self, space, w_fd, eventmask=DEF_REGISTER_EVENTMASK):
self.check_closed(space)
self.epoll_ctl(space, EPOLL_CTL_ADD, w_fd, eventmask)
@@ -141,7 +145,7 @@
self.epoll_ctl(space, EPOLL_CTL_DEL, w_fd, 0, ignore_ebadf=True)
@unwrap_spec(eventmask=int)
- def descr_modify(self, space, w_fd, eventmask=-1):
+ def descr_modify(self, space, w_fd, eventmask):
self.check_closed(space)
self.epoll_ctl(space, EPOLL_CTL_MOD, w_fd, eventmask)
From pypy.commits at gmail.com Sat May 7 14:53:22 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sat, 07 May 2016 11:53:22 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: fix
Message-ID: <572e39a2.49961c0a.938e1.0137@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84278:0e4c7a2b1e39
Date: 2016-05-07 11:49 -0700
http://bitbucket.org/pypy/pypy/changeset/0e4c7a2b1e39/
Log: fix
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -850,8 +850,7 @@
assert getattr(T, '\u03bc') == 2
assert getattr(T, '\u87d2') == 3
#assert getattr(T, 'x\U000E0100') == 4
- expected = ("['__dict__', '__doc__', '__module__', "
- "'__qualname__', '__weakref__', "
+ expected = ("['__dict__', '__doc__', '__module__', '__weakref__', "
# "x󠄀", "'ä', 'μ', '蟒']")
"'ä', 'μ', '蟒']")
assert expected in str(sorted(T.__dict__.keys()))
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -763,7 +763,7 @@
class C(metaclass=T):
pass
assert d
- assert sorted(d[0].keys()) == ['__dict__', '__doc__', '__module__', '__qualname__', '__weakref__']
+ assert sorted(d[0].keys()) == ['__dict__', '__doc__', '__module__', '__weakref__']
d = []
class T(type):
def mro(cls):
From pypy.commits at gmail.com Sat May 7 14:53:24 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sat, 07 May 2016 11:53:24 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: allow setting class docstrings
Message-ID: <572e39a4.de361c0a.db8e8.0098@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84279:605722e607a3
Date: 2016-05-07 11:49 -0700
http://bitbucket.org/pypy/pypy/changeset/605722e607a3/
Log: allow setting class docstrings
diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py
--- a/lib-python/3/test/test_descr.py
+++ b/lib-python/3/test/test_descr.py
@@ -4515,9 +4515,9 @@
with self.assertRaises(TypeError) as cm:
type(list).__dict__["__doc__"].__set__(list, "blah")
self.assertIn("can't set list.__doc__", str(cm.exception))
- with self.assertRaises(TypeError) as cm:
+ with self.assertRaises((AttributeError, TypeError)) as cm:
type(X).__dict__["__doc__"].__delete__(X)
- self.assertIn("can't delete X.__doc__", str(cm.exception))
+ self.assertIn("delete", str(cm.exception))
self.assertEqual(X.__doc__, "banana")
def test_qualname(self):
diff --git a/pypy/interpreter/test/test_class.py b/pypy/interpreter/test/test_class.py
--- a/pypy/interpreter/test/test_class.py
+++ b/pypy/interpreter/test/test_class.py
@@ -123,3 +123,14 @@
assert C.__qualname__ == 'test_qualname..C'
assert C.D.__qualname__ == 'test_qualname..C.D'
assert not hasattr(C(), '__qualname__')
+
+ def test_set_doc(self):
+ class X:
+ "elephant"
+ X.__doc__ = "banana"
+ assert X.__doc__ == "banana"
+ raises(TypeError, lambda:
+ type(list).__dict__["__doc__"].__set__(list, "blah"))
+ raises((AttributeError, TypeError), lambda:
+ type(X).__dict__["__doc__"].__delete__(X))
+ assert X.__doc__ == "banana"
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -853,6 +853,12 @@
else:
return space.get(w_result, space.w_None, w_type)
+def descr_set__doc(space, w_type, w_value):
+ w_type = _check(space, w_type)
+ if not w_type.is_heaptype():
+ raise oefmt(space.w_TypeError, "can't set %N.__doc__", w_type)
+ w_type.setdictvalue(space, '__doc__', w_value)
+
def descr__dir(space, w_type):
from pypy.objspace.std.util import _classdir
return space.call_function(space.w_list, _classdir(space, w_type))
@@ -928,7 +934,7 @@
__base__ = GetSetProperty(descr__base),
__mro__ = GetSetProperty(descr_get__mro__),
__dict__ = GetSetProperty(descr_get_dict),
- __doc__ = GetSetProperty(descr__doc),
+ __doc__ = GetSetProperty(descr__doc, descr_set__doc),
__dir__ = gateway.interp2app(descr__dir),
mro = gateway.interp2app(descr_mro),
__flags__ = GetSetProperty(descr__flags),
From pypy.commits at gmail.com Sat May 7 17:56:17 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sat, 07 May 2016 14:56:17 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Close branch py3k
Message-ID: <572e6481.875a1c0a.49d7a.3bcb@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84280:15f134f91f71
Date: 2016-05-07 14:55 -0700
http://bitbucket.org/pypy/pypy/changeset/15f134f91f71/
Log: Close branch py3k
From pypy.commits at gmail.com Sat May 7 18:02:48 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sat, 07 May 2016 15:02:48 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: merge
Message-ID: <572e6608.697ac20a.8c526.5d5b@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84282:77404fa13979
Date: 2016-05-07 15:01 -0700
http://bitbucket.org/pypy/pypy/changeset/77404fa13979/
Log: merge
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -802,8 +802,6 @@
@unwrap_spec(errors='str_or_None')
def unicode_internal_decode(space, w_string, errors="strict"):
- space.warn(space.wrap("unicode_internal codec has been deprecated"),
- space.w_DeprecationWarning)
if errors is None:
errors = 'strict'
# special case for this codec: unicodes are returned as is
@@ -811,6 +809,8 @@
return space.newtuple([w_string, space.len(w_string)])
string = space.readbuf_w(w_string).as_str()
+ space.warn(space.wrap("unicode_internal codec has been deprecated"),
+ space.w_DeprecationWarning)
if len(string) == 0:
return space.newtuple([space.wrap(u''), space.wrap(0)])
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -805,3 +805,38 @@
assert _codecs.unicode_escape_decode(b) == (u'', 0)
assert _codecs.raw_unicode_escape_decode(b) == (u'', 0)
assert _codecs.unicode_internal_decode(b) == (u'', 0)
+
+ def test_unicode_internal_warnings(self):
+ import codecs, warnings
+ warnings.simplefilter("always")
+ encoder = codecs.getencoder("unicode_internal")
+ decoder = codecs.getdecoder("unicode_internal")
+ warning_msg = "unicode_internal codec has been deprecated"
+ with warnings.catch_warnings(record=True) as w:
+ try:
+ encoder(42)
+ except TypeError:
+ pass
+ assert len(w) == 1
+ assert str(w[0].message) == warning_msg
+ assert w[0].category == DeprecationWarning
+
+ with warnings.catch_warnings(record=True) as w:
+ try:
+ decoder(42)
+ except TypeError:
+ pass
+ assert len(w) == 0
+
+ with warnings.catch_warnings(record=True) as w:
+ encoded_abc = encoder("abc")[0]
+ assert len(w) == 1
+ assert str(w[0].message)== warning_msg
+ assert w[0].category == DeprecationWarning
+
+ with warnings.catch_warnings(record=True) as w:
+ print(type(encoded_abc))
+ decoder(encoded_abc)
+ assert len(w) == 1
+ assert str(w[0].message) == warning_msg
+ assert w[0].category == DeprecationWarning
From pypy.commits at gmail.com Sat May 7 18:02:47 2016
From: pypy.commits at gmail.com (marky1991)
Date: Sat, 07 May 2016 15:02:47 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Match cpython's inconsistent warning about
the now-deprecated unicode_internal codec.
Message-ID: <572e6607.83e21c0a.2f8b6.ffffcd2c@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r84281:3c339639fd2f
Date: 2016-05-07 16:29 -0400
http://bitbucket.org/pypy/pypy/changeset/3c339639fd2f/
Log: Match cpython's inconsistent warning about the now-deprecated
unicode_internal codec.
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -802,8 +802,6 @@
@unwrap_spec(errors='str_or_None')
def unicode_internal_decode(space, w_string, errors="strict"):
- space.warn(space.wrap("unicode_internal codec has been deprecated"),
- space.w_DeprecationWarning)
if errors is None:
errors = 'strict'
# special case for this codec: unicodes are returned as is
@@ -811,6 +809,8 @@
return space.newtuple([w_string, space.len(w_string)])
string = space.readbuf_w(w_string).as_str()
+ space.warn(space.wrap("unicode_internal codec has been deprecated"),
+ space.w_DeprecationWarning)
if len(string) == 0:
return space.newtuple([space.wrap(u''), space.wrap(0)])
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -805,3 +805,38 @@
assert _codecs.unicode_escape_decode(b) == (u'', 0)
assert _codecs.raw_unicode_escape_decode(b) == (u'', 0)
assert _codecs.unicode_internal_decode(b) == (u'', 0)
+
+ def test_unicode_internal_warnings(self):
+ import codecs, warnings
+ warnings.simplefilter("always")
+ encoder = codecs.getencoder("unicode_internal")
+ decoder = codecs.getdecoder("unicode_internal")
+ warning_msg = "unicode_internal codec has been deprecated"
+ with warnings.catch_warnings(record=True) as w:
+ try:
+ encoder(42)
+ except TypeError:
+ pass
+ assert len(w) == 1
+ assert str(w[0].message) == warning_msg
+ assert w[0].category == DeprecationWarning
+
+ with warnings.catch_warnings(record=True) as w:
+ try:
+ decoder(42)
+ except TypeError:
+ pass
+ assert len(w) == 0
+
+ with warnings.catch_warnings(record=True) as w:
+ encoded_abc = encoder("abc")[0]
+ assert len(w) == 1
+ assert str(w[0].message)== warning_msg
+ assert w[0].category == DeprecationWarning
+
+ with warnings.catch_warnings(record=True) as w:
+ print(type(encoded_abc))
+ decoder(encoded_abc)
+ assert len(w) == 1
+ assert str(w[0].message) == warning_msg
+ assert w[0].category == DeprecationWarning
From pypy.commits at gmail.com Sat May 7 20:06:00 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sat, 07 May 2016 17:06:00 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: o set __main__ loader to SourceFileLoader
like cpython
Message-ID: <572e82e8.4374c20a.6edf0.7650@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84283:6c12658dce83
Date: 2016-05-07 17:04 -0700
http://bitbucket.org/pypy/pypy/changeset/6c12658dce83/
Log: o set __main__ loader to SourceFileLoader like cpython o workaround
subtle test_cmd_line_script impl details
diff --git a/lib-python/3/test/test_cmd_line_script.py b/lib-python/3/test/test_cmd_line_script.py
--- a/lib-python/3/test/test_cmd_line_script.py
+++ b/lib-python/3/test/test_cmd_line_script.py
@@ -41,7 +41,11 @@
_loader = __loader__ if __loader__ is BuiltinImporter else type(__loader__)
print('__loader__==%a' % _loader)
print('__file__==%a' % __file__)
-assertEqual(__cached__, None)
+if __cached__ is not None:
+ # XXX: test_script_compiled on PyPy
+ assertEqual(__file__, __cached__)
+ if not __cached__.endswith(('pyc', 'pyo')):
+ raise AssertionError('has __cached__ but not compiled')
print('__package__==%r' % __package__)
# Check the sys module
import sys
@@ -159,8 +163,9 @@
def test_basic_script(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
+ package = '' if support.check_impl_detail(pypy=True) else None
self._check_script(script_name, script_name, script_name,
- script_dir, None,
+ script_dir, package,
importlib.machinery.SourceFileLoader)
def test_script_compiled(self):
@@ -169,8 +174,9 @@
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
+ package = '' if support.check_impl_detail(pypy=True) else None
self._check_script(pyc_file, pyc_file,
- pyc_file, script_dir, None,
+ pyc_file, script_dir, package,
importlib.machinery.SourcelessFileLoader)
def test_directory(self):
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -677,9 +677,11 @@
# CPython goes to great lengths to detect other cases
# of pyc file format, but I think it's ok not to care.
try:
- from _frozen_importlib import SourcelessFileLoader
+ from _frozen_importlib import (
+ SourceFileLoader, SourcelessFileLoader)
except ImportError:
- from _frozen_importlib_external import SourcelessFileLoader
+ from _frozen_importlib_external import (
+ SourceFileLoader, SourcelessFileLoader)
if IS_WINDOWS:
filename = filename.lower()
if filename.endswith('.pyc') or filename.endswith('.pyo'):
@@ -701,6 +703,10 @@
break
else:
# That's the normal path, "pypy stuff.py".
+ # We don't actually load via SourceFileLoader
+ # because we require PyCF_ACCEPT_NULL_BYTES
+ loader = SourceFileLoader('__main__', filename)
+ mainmodule.__loader__ = loader
@hidden_applevel
def execfile(filename, namespace):
with open(filename, 'rb') as f:
From pypy.commits at gmail.com Sun May 8 02:57:58 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 07 May 2016 23:57:58 -0700 (PDT)
Subject: [pypy-commit] pypy default: Issue #2293: codecs.py will sometimes
issue a reset() on a StreamWriter
Message-ID: <572ee376.8344c20a.2d101.ffffd1c8@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84284:591a29bc54fc
Date: 2016-05-08 08:58 +0200
http://bitbucket.org/pypy/pypy/changeset/591a29bc54fc/
Log: Issue #2293: codecs.py will sometimes issue a reset() on a
StreamWriter attached to a file that is not opened for writing at
all. We must not emit a "write('')"!
diff --git a/pypy/module/_multibytecodec/app_multibytecodec.py b/pypy/module/_multibytecodec/app_multibytecodec.py
--- a/pypy/module/_multibytecodec/app_multibytecodec.py
+++ b/pypy/module/_multibytecodec/app_multibytecodec.py
@@ -44,8 +44,10 @@
self, data))
def reset(self):
- self.stream.write(MultibyteIncrementalEncoder.encode(
- self, '', final=True))
+ data = MultibyteIncrementalEncoder.encode(
+ self, '', final=True)
+ if len(data) > 0:
+ self.stream.write(data)
MultibyteIncrementalEncoder.reset(self)
def writelines(self, lines):
diff --git a/pypy/module/_multibytecodec/test/test_app_stream.py b/pypy/module/_multibytecodec/test/test_app_stream.py
--- a/pypy/module/_multibytecodec/test/test_app_stream.py
+++ b/pypy/module/_multibytecodec/test/test_app_stream.py
@@ -90,3 +90,15 @@
w.write(u'\u304b')
w.write(u'\u309a')
assert w.stream.output == ['\x83m', '', '\x82\xf5']
+
+ def test_writer_seek_no_empty_write(self):
+ # issue #2293: codecs.py will sometimes issue a reset()
+ # on a StreamWriter attached to a file that is not opened
+ # for writing at all. We must not emit a "write('')"!
+ class FakeFile:
+ def write(self, data):
+ raise IOError("can't write!")
+ #
+ w = self.ShiftJisx0213StreamWriter(FakeFile())
+ w.reset()
+ # assert did not crash
From pypy.commits at gmail.com Sun May 8 09:40:54 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 06:40:54 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Fix test
Message-ID: <572f41e6.4ea81c0a.2c7ec.47a7@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84285:dd0d964c6dec
Date: 2016-05-08 15:41 +0200
http://bitbucket.org/pypy/pypy/changeset/dd0d964c6dec/
Log: Fix test
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -272,8 +272,9 @@
v3 = varoftype(lltype.Signed)
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
- op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3)
- oplist = Transformer(FakeCPU()).rewrite_operation(op)
+ op = SpaceOperation('foobar', [v1, v2], v3)
+ oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.add_ovf',
+ [v1, v2])
op1, op0 = oplist
assert op0.opname == 'int_add_ovf'
if isinstance(v1, Constant) and isinstance(v2, Variable):
From pypy.commits at gmail.com Sun May 8 11:33:48 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 08:33:48 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Add an oopspec to turn
divisions into "int_py_div" in the JIT frontend.
Message-ID: <572f5c5c.a60ac20a.b69fb.7f63@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84287:ac5d871e304d
Date: 2016-05-08 17:33 +0200
http://bitbucket.org/pypy/pypy/changeset/ac5d871e304d/
Log: Add an oopspec to turn divisions into "int_py_div" in the JIT
frontend. The plan is to keep them as "int_py_div", and rewrite them
in the end to "int_c_div".
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1444,7 +1444,7 @@
self.mov(imm0, resloc)
self.mc.CMOVNS(resloc, arglocs[0])
- def genop_int_mod(self, op, arglocs, resloc):
+ def genop_int_c_mod(self, op, arglocs, resloc):
if IS_X86_32:
self.mc.CDQ()
elif IS_X86_64:
@@ -1452,7 +1452,7 @@
self.mc.IDIV_r(ecx.value)
- genop_int_floordiv = genop_int_mod
+ genop_int_c_div = genop_int_c_mod
def genop_uint_floordiv(self, op, arglocs, resloc):
self.mc.XOR_rr(edx.value, edx.value)
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -598,15 +598,15 @@
assert l2 is resultreg
self.rm.possibly_free_var(tmpvar)
- def consider_int_mod(self, op):
+ def consider_int_c_mod(self, op):
self._consider_int_div_or_mod(op, edx, eax)
self.perform(op, [eax, ecx], edx)
- def consider_int_floordiv(self, op):
+ def consider_int_c_div(self, op):
self._consider_int_div_or_mod(op, eax, edx)
self.perform(op, [eax, ecx], eax)
- consider_uint_floordiv = consider_int_floordiv
+ consider_uint_floordiv = consider_int_c_div
def _consider_compop(self, op):
vx = op.getarg(0)
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -1903,12 +1903,15 @@
self.callcontrol.callinfocollection.add(oopspecindex, calldescr, func)
def _handle_int_ovf(self, op, oopspec_name, args):
- assert oopspec_name in ('int.add_ovf', 'int.sub_ovf', 'int.mul_ovf')
+ assert oopspec_name in ('int.add_ovf', 'int.sub_ovf', 'int.mul_ovf',
+ 'int.py_div', 'int.py_mod')
op0 = SpaceOperation(oopspec_name.replace('.', '_'), args, op.result)
- if oopspec_name != 'int.sub_ovf':
+ if oopspec_name in ('int.add_ovf', 'int.mul_ovf'):
op0 = self._rewrite_symmetric(op0)
- oplive = SpaceOperation('-live-', [], None)
- return [oplive, op0]
+ oplist = [op0]
+ if oopspec_name.endswith('_ovf'):
+ oplist.insert(0, SpaceOperation('-live-', [], None))
+ return oplist
def _handle_stroruni_call(self, op, oopspec_name, args):
SoU = args[0].concretetype # Ptr(STR) or Ptr(UNICODE)
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -268,15 +268,16 @@
assert op1.result == v3
assert op1.opname == name2[0]
-def test_symmetric_int_add_ovf():
+ at py.test.mark.parametrize('opname', ['add_ovf', 'mul_ovf'])
+def test_symmetric_op_ovf(opname):
v3 = varoftype(lltype.Signed)
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
op = SpaceOperation('foobar', [v1, v2], v3)
- oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.add_ovf',
+ oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
[v1, v2])
op1, op0 = oplist
- assert op0.opname == 'int_add_ovf'
+ assert op0.opname == 'int_'+opname
if isinstance(v1, Constant) and isinstance(v2, Variable):
assert op0.args == [v2, v1]
assert op0.result == v3
@@ -287,6 +288,35 @@
assert op1.args == []
assert op1.result is None
+ at py.test.mark.parametrize('opname', ['sub_ovf'])
+def test_asymmetric_op_ovf(opname):
+ v3 = varoftype(lltype.Signed)
+ for v1 in [varoftype(lltype.Signed), const(42)]:
+ for v2 in [varoftype(lltype.Signed), const(43)]:
+ op = SpaceOperation('foobar', [v1, v2], v3)
+ oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
+ [v1, v2])
+ op1, op0 = oplist
+ assert op0.opname == 'int_'+opname
+ assert op0.args == [v1, v2]
+ assert op0.result == v3
+ assert op1.opname == '-live-'
+ assert op1.args == []
+ assert op1.result is None
+
+ at py.test.mark.parametrize('opname', ['py_div', 'py_mod'])
+def test_asymmetric_op_nonovf(opname):
+ v3 = varoftype(lltype.Signed)
+ for v1 in [varoftype(lltype.Signed), const(42)]:
+ for v2 in [varoftype(lltype.Signed), const(43)]:
+ op = SpaceOperation('foobar', [v1, v2], v3)
+ oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
+ [v1, v2])
+ [op0] = oplist
+ assert op0.opname == 'int_'+opname
+ assert op0.args == [v1, v2]
+ assert op0.result == v3
+
def test_calls():
for RESTYPE, with_void, with_i, with_r, with_f in product(
[lltype.Signed, rclass.OBJECTPTR, lltype.Float, lltype.Void],
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -430,8 +430,8 @@
return 0, label
@arguments("i", "i", returns="i")
- def bhimpl_int_floordiv(a, b):
- return llop.int_floordiv(lltype.Signed, a, b)
+ def bhimpl_int_py_div(a, b):
+ return a // b
@arguments("i", "i", returns="i")
def bhimpl_uint_floordiv(a, b):
@@ -439,8 +439,8 @@
return intmask(c)
@arguments("i", "i", returns="i")
- def bhimpl_int_mod(a, b):
- return llop.int_mod(lltype.Signed, a, b)
+ def bhimpl_int_py_mod(a, b):
+ return a % b
@arguments("i", "i", returns="i")
def bhimpl_int_and(a, b):
diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
--- a/rpython/jit/metainterp/executor.py
+++ b/rpython/jit/metainterp/executor.py
@@ -409,6 +409,8 @@
rop.GC_STORE,
rop.GC_STORE_INDEXED,
rop.LOAD_FROM_GC_TABLE,
+ rop.INT_C_DIV,
+ rop.INT_C_MOD,
): # list of opcodes never executed by pyjitpl
continue
if rop._VEC_PURE_FIRST <= value <= rop._VEC_PURE_LAST:
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -201,7 +201,7 @@
# ------------------------------
- for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod',
+ for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_py_div', 'int_py_mod',
'int_and', 'int_or', 'int_xor', 'int_signext',
'int_rshift', 'int_lshift', 'uint_rshift',
'uint_lt', 'uint_le', 'uint_gt', 'uint_ge',
diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -955,9 +955,11 @@
'INT_ADD/2/i',
'INT_SUB/2/i',
'INT_MUL/2/i',
- 'INT_FLOORDIV/2/i',
+ 'INT_C_DIV/2/i', # C-style handling of negatives (backend only)
+ 'INT_PY_DIV/2/i', # Python-style handling of negatives (frontend)
'UINT_FLOORDIV/2/i',
- 'INT_MOD/2/i',
+ 'INT_C_MOD/2/i', # C-style handling of negatives (backend only)
+ 'INT_PY_MOD/2/i', # Python-style handling of negatives (frontend)
'INT_AND/2/i',
'INT_OR/2/i',
'INT_XOR/2/i',
diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py
--- a/rpython/jit/metainterp/test/test_ajit.py
+++ b/rpython/jit/metainterp/test/test_ajit.py
@@ -601,7 +601,7 @@
policy = StopAtXPolicy(externfn)
res = self.meta_interp(f, [31], policy=policy)
assert res == 42
- self.check_resops(int_mul=2, int_mod=0)
+ self.check_resops(int_mul=2, int_py_mod=0, int_c_mod=0)
def test_we_are_jitted(self):
myjitdriver = JitDriver(greens = [], reds = ['y'])
diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py
--- a/rpython/jit/metainterp/test/test_dict.py
+++ b/rpython/jit/metainterp/test/test_dict.py
@@ -117,7 +117,7 @@
res1 = f(100)
res2 = self.meta_interp(f, [100], listops=True)
assert res1 == res2
- self.check_resops(int_mod=2) # the hash was traced and eq, but cached
+ self.check_resops(int_py_mod=2) # the hash was traced and eq, but cached
def test_dict_setdefault(self):
myjitdriver = JitDriver(greens = [], reds = ['total', 'dct'])
@@ -156,7 +156,7 @@
assert f(100) == 50
res = self.meta_interp(f, [100], listops=True)
assert res == 50
- self.check_resops(int_mod=2) # key + eq, but cached
+ self.check_resops(int_py_mod=2) # key + eq, but cached
def test_repeated_lookup(self):
if type(self.newdict()) is not dict:
diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py
--- a/rpython/rtyper/rint.py
+++ b/rpython/rtyper/rint.py
@@ -382,6 +382,7 @@
# ---------- floordiv ----------
+ at jit.oopspec("int.py_div(x, y)")
def ll_int_floordiv(x, y):
# Python, and RPython, assume that integer division truncates
# towards -infinity. However, in C, integer division truncates
@@ -447,6 +448,7 @@
# ---------- mod ----------
+ at jit.oopspec("int.py_mod(x, y)")
def ll_int_mod(x, y):
r = llop.int_mod(Signed, x, y) # <= truncates like in C
if y < 0: u = -r
From pypy.commits at gmail.com Sun May 8 11:33:46 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 08:33:46 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Fix a few more tests
Message-ID: <572f5c5a.0b1f1c0a.fc792.7462@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84286:ec8b32c158c1
Date: 2016-05-08 16:19 +0200
http://bitbucket.org/pypy/pypy/changeset/ec8b32c158c1/
Log: Fix a few more tests
diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py
--- a/rpython/jit/metainterp/test/test_ajit.py
+++ b/rpython/jit/metainterp/test/test_ajit.py
@@ -1173,7 +1173,6 @@
def test_div_overflow(self):
import sys
- from rpython.rtyper.lltypesystem.lloperation import llop
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
res = 0
@@ -1181,15 +1180,13 @@
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
try:
- res += llop.int_floordiv_ovf(lltype.Signed,
- -sys.maxint-1, x)
+ res += ovfcheck((-sys.maxint-1) // x)
x += 5
except OverflowError:
res += 100
y -= 1
return res
- res = self.meta_interp(f, [-41, 16])
- assert res == ((-sys.maxint-1) // (-41) +
+ expected = ((-sys.maxint-1) // (-41) +
(-sys.maxint-1) // (-36) +
(-sys.maxint-1) // (-31) +
(-sys.maxint-1) // (-26) +
@@ -1198,10 +1195,12 @@
(-sys.maxint-1) // (-11) +
(-sys.maxint-1) // (-6) +
100 * 8)
+ assert f(-41, 16) == expected
+ res = self.meta_interp(f, [-41, 16])
+ assert res == expected
def test_overflow_fold_if_divisor_constant(self):
import sys
- from rpython.rtyper.lltypesystem.lloperation import llop
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
res = 0
@@ -1209,10 +1208,8 @@
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
try:
- res += llop.int_floordiv_ovf(lltype.Signed,
- x, 2)
- res += llop.int_mod_ovf(lltype.Signed,
- x, 2)
+ res += ovfcheck(x // 2)
+ res += ovfcheck(x % 2)
x += 5
except OverflowError:
res += 100
@@ -1312,7 +1309,6 @@
def test_free_object(self):
import weakref
- from rpython.rtyper.lltypesystem.lloperation import llop
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
class X(object):
pass
@@ -3824,7 +3820,6 @@
self.check_operations_history(guard_class=0, record_exact_class=1)
def test_give_class_knowledge_to_tracer_explicitly(self):
- from rpython.rtyper.lltypesystem.lloperation import llop
class Base(object):
def f(self):
raise NotImplementedError
diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py
--- a/rpython/jit/metainterp/warmspot.py
+++ b/rpython/jit/metainterp/warmspot.py
@@ -448,7 +448,6 @@
graphs=graphs,
merge_if_blocks=True,
constfold=True,
- raisingop2direct_call=False,
remove_asserts=True,
really_remove_asserts=True)
From pypy.commits at gmail.com Sun May 8 12:12:52 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 09:12:52 -0700 (PDT)
Subject: [pypy-commit] pypy default: A failing test about division bounds
Message-ID: <572f6584.89cbc20a.44a3.ffff888b@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84288:4b58008df717
Date: 2016-05-08 18:13 +0200
http://bitbucket.org/pypy/pypy/changeset/4b58008df717/
Log: A failing test about division bounds
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -5529,6 +5529,27 @@
"""
self.optimize_loop(ops, expected)
+ def test_division_bound_bug(self):
+ ops = """
+ [i4]
+ i1 = int_ge(i4, -50)
+ guard_true(i1) []
+ i2 = int_le(i4, -40)
+ guard_true(i2) []
+ # here, -50 <= i4 <= -40
+
+ i5 = int_floordiv(i4, 30)
+ # here, we know that that i5 == -1 (C-style handling of negatives!)
+ escape_n(i5)
+ jump(i4)
+ """
+ expected = """
+ [i4, i5]
+ escape_n(-1)
+ jump(i4, i5)
+ """
+ self.optimize_loop(ops, expected)
+
def test_subsub_ovf(self):
ops = """
[i0]
From pypy.commits at gmail.com Sun May 8 12:36:41 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 09:36:41 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix the failing test of 4b58008df717
Message-ID: <572f6b19.2472c20a.acfef.ffffa39a@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84289:d07e57c5d7f3
Date: 2016-05-08 18:36 +0200
http://bitbucket.org/pypy/pypy/changeset/d07e57c5d7f3/
Log: Fix the failing test of 4b58008df717
diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py
--- a/rpython/jit/metainterp/optimizeopt/intutils.py
+++ b/rpython/jit/metainterp/optimizeopt/intutils.py
@@ -1,5 +1,8 @@
+import sys
from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, maxint, is_valid_int
from rpython.rlib.objectmodel import we_are_translated
+from rpython.rtyper.lltypesystem import lltype
+from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.metainterp.resoperation import rop, ResOperation
from rpython.jit.metainterp.optimizeopt.info import AbstractInfo, INFO_NONNULL,\
INFO_UNKNOWN, INFO_NULL
@@ -174,15 +177,13 @@
def div_bound(self, other):
if self.has_upper and self.has_lower and \
other.has_upper and other.has_lower and \
- not other.contains(0):
- try:
- vals = (ovfcheck(self.upper / other.upper),
- ovfcheck(self.upper / other.lower),
- ovfcheck(self.lower / other.upper),
- ovfcheck(self.lower / other.lower))
- return IntBound(min4(vals), max4(vals))
- except OverflowError:
- return IntUnbounded()
+ not other.contains(0) and self.lower > (-sys.maxint-1):
+ vals = (
+ llop.int_floordiv(lltype.Signed, self.upper, other.upper),
+ llop.int_floordiv(lltype.Signed, self.upper, other.lower),
+ llop.int_floordiv(lltype.Signed, self.lower, other.upper),
+ llop.int_floordiv(lltype.Signed, self.lower, other.lower))
+ return IntBound(min4(vals), max4(vals))
else:
return IntUnbounded()
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
@@ -240,6 +240,8 @@
def test_div_bound():
+ from rpython.rtyper.lltypesystem import lltype
+ from rpython.rtyper.lltypesystem.lloperation import llop
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.div_bound(b2)
@@ -247,7 +249,8 @@
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
if n2 != 0:
- assert b3.contains(n1 / n2)
+ assert b3.contains(
+ llop.int_floordiv(lltype.Signed, n1, n2))
a=bound(2, 4).div_bound(bound(1, 2))
assert not a.contains(0)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -5546,7 +5546,7 @@
expected = """
[i4, i5]
escape_n(-1)
- jump(i4, i5)
+ jump(i4, -1)
"""
self.optimize_loop(ops, expected)
From pypy.commits at gmail.com Sun May 8 12:38:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 09:38:35 -0700 (PDT)
Subject: [pypy-commit] cffi default: Fix tests in 'testing' to account for
e7ca388b0197 (I only fixed the
Message-ID: <572f6b8b.4374c20a.6edf0.ffff98eb@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2695:72a157dc2bde
Date: 2016-05-08 18:38 +0200
http://bitbucket.org/cffi/cffi/changeset/72a157dc2bde/
Log: Fix tests in 'testing' to account for e7ca388b0197 (I only fixed the
tests in 'c/test_c.py')
diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py
--- a/cffi/backend_ctypes.py
+++ b/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py
--- a/testing/cffi0/backend_tests.py
+++ b/testing/cffi0/backend_tests.py
@@ -55,7 +55,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -284,7 +284,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -325,7 +327,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
diff --git a/testing/cffi1/test_new_ffi_1.py b/testing/cffi1/test_new_ffi_1.py
--- a/testing/cffi1/test_new_ffi_1.py
+++ b/testing/cffi1/test_new_ffi_1.py
@@ -138,7 +138,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -350,7 +350,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -390,7 +392,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
From pypy.commits at gmail.com Sun May 8 12:39:36 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 09:39:36 -0700 (PDT)
Subject: [pypy-commit] pypy default: import cffi/72a157dc2bde
Message-ID: <572f6bc8.8a9d1c0a.c2d50.ffff8525@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84290:df4850cd8713
Date: 2016-05-08 18:39 +0200
http://bitbucket.org/pypy/pypy/changeset/df4850cd8713/
Log: import cffi/72a157dc2bde
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
@@ -56,7 +56,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -285,7 +285,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -326,7 +328,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
@@ -1523,21 +1529,30 @@
import gc; gc.collect(); gc.collect(); gc.collect()
assert seen == [3]
+ def test_gc_disable(self):
+ ffi = FFI(backend=self.Backend())
+ p = ffi.new("int *", 123)
+ py.test.raises(TypeError, ffi.gc, p, None)
+ seen = []
+ q1 = ffi.gc(p, lambda p: seen.append(1))
+ q2 = ffi.gc(q1, lambda p: seen.append(2))
+ import gc; gc.collect()
+ assert seen == []
+ assert ffi.gc(q1, None) is None
+ del q1, q2
+ import gc; gc.collect(); gc.collect(); gc.collect()
+ assert seen == [2]
+
def test_gc_finite_list(self):
ffi = FFI(backend=self.Backend())
- public = not hasattr(ffi._backend, 'gcp')
p = ffi.new("int *", 123)
keepalive = []
for i in range(10):
keepalive.append(ffi.gc(p, lambda p: None))
- if public:
- assert len(ffi.gc_weakrefs.data) == i + 1
del keepalive[:]
import gc; gc.collect(); gc.collect()
for i in range(10):
keepalive.append(ffi.gc(p, lambda p: None))
- if public:
- assert len(ffi.gc_weakrefs.data) == 10
def test_CData_CType(self):
ffi = FFI(backend=self.Backend())
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
@@ -467,12 +467,12 @@
def test_introspect_order(self):
ffi = FFI()
- ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
- ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;")
- ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
+ ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
+ ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
def test_unpack(self):
ffi = FFI()
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
@@ -139,7 +139,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -351,7 +351,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -391,7 +393,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1898,14 +1898,14 @@
def test_introspect_order():
ffi = FFI()
- ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
- ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;")
- ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;")
+ ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
+ ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
+ ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
verify(ffi, "test_introspect_order", """
- union aaa { int a; }; typedef struct ccc { int a; } b;
- union g { int a; }; typedef struct cc { int a; } bbb;
- union aa { int a; }; typedef struct a { int a; } bb;
+ union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+ union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;
+ union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;
""")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
@@ -280,6 +280,14 @@
pass
with open("setup.py", "w") as f:
f.write("""if 1:
+ # https://bugs.python.org/issue23246
+ import sys
+ if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
+
import cffi
ffi = cffi.FFI()
ffi.set_source("pack1.mymod", "/*code would be here*/")
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
@@ -80,8 +80,21 @@
# find a solution to that: we could hack sys.path inside the
# script run here, but we can't hack it in the same way in
# execute().
- output = self._run([sys.executable,
- os.path.join(local_dir, filename)])
+ pathname = os.path.join(path, filename)
+ with open(pathname, 'w') as g:
+ g.write('''
+# https://bugs.python.org/issue23246
+import sys
+if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
+''')
+ with open(os.path.join(local_dir, filename), 'r') as f:
+ g.write(f.read())
+
+ output = self._run([sys.executable, pathname])
match = re.compile(r"\bFILENAME: (.+)").search(output)
assert match
dynamic_lib_name = match.group(1)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/udir.py b/pypy/module/test_lib_pypy/cffi_tests/udir.py
--- a/pypy/module/test_lib_pypy/cffi_tests/udir.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/udir.py
@@ -1,4 +1,14 @@
# Generated by pypy/tool/import_cffi.py
import py
+import sys
udir = py.path.local.make_numbered_dir(prefix = 'ffi-')
+
+
+# Windows-only workaround for some configurations: see
+# https://bugs.python.org/issue23246 (Python 2.7.9)
+if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
From pypy.commits at gmail.com Sun May 8 12:59:17 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 08 May 2016 09:59:17 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: fix fix fix
Message-ID: <572f7065.d2711c0a.9e252.ffff87a3@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84291:f69d3f2271bd
Date: 2016-05-08 18:59 +0200
http://bitbucket.org/pypy/pypy/changeset/f69d3f2271bd/
Log: fix fix fix
diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py
--- a/rpython/jit/metainterp/optimizeopt/dependency.py
+++ b/rpython/jit/metainterp/optimizeopt/dependency.py
@@ -930,7 +930,7 @@
exec py.code.Source(multiplicative_func_source
.format(name='INT_MUL', op='*', tgt='mul', cop='*')).compile()
exec py.code.Source(multiplicative_func_source
- .format(name='INT_FLOORDIV', op='*', tgt='div', cop='/')).compile()
+ .format(name='INT_PY_DIV', op='*', tgt='div', cop='/')).compile()
exec py.code.Source(multiplicative_func_source
.format(name='UINT_FLOORDIV', op='*', tgt='div', cop='/')).compile()
del multiplicative_func_source
diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py
--- a/rpython/jit/metainterp/optimizeopt/intbounds.py
+++ b/rpython/jit/metainterp/optimizeopt/intbounds.py
@@ -172,22 +172,21 @@
if b.bounded():
r.intersect(b)
- def optimize_INT_FLOORDIV(self, op):
+ def optimize_INT_PY_DIV(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
self.emit_operation(op)
r = self.getintbound(op)
- r.intersect(b1.div_bound(b2))
+ r.intersect(b1.py_div_bound(b2))
- def optimize_INT_MOD(self, op):
+ def optimize_INT_PY_MOD(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
- known_nonneg = (b1.known_ge(IntBound(0, 0)) and
- b2.known_ge(IntBound(0, 0)))
- if known_nonneg and b2.is_constant():
+ if b2.is_constant():
val = b2.getint()
- if (val & (val-1)) == 0:
- # nonneg % power-of-two ==> nonneg & (power-of-two - 1)
+ if val > 0 and (val & (val-1)) == 0:
+ # x % power-of-two ==> x & (power-of-two - 1)
+ # with Python's modulo, this is valid even if 'x' is negative.
arg1 = op.getarg(0)
arg2 = ConstInt(val-1)
op = self.replace_op_with(op, rop.INT_AND,
@@ -196,15 +195,12 @@
if b2.is_constant():
val = b2.getint()
r = self.getintbound(op)
- if val < 0:
- if val == -sys.maxint-1:
- return # give up
- val = -val
- if known_nonneg:
+ if val >= 0: # with Python's modulo: 0 <= (x % pos) < pos
r.make_ge(IntBound(0, 0))
- else:
- r.make_gt(IntBound(-val, -val))
- r.make_lt(IntBound(val, val))
+ r.make_lt(IntBound(val, val))
+ else: # with Python's modulo: neg < (x % neg) <= 0
+ r.make_gt(IntBound(val, val))
+ r.make_le(IntBound(0, 0))
def optimize_INT_LSHIFT(self, op):
arg0 = self.get_box_replacement(op.getarg(0))
@@ -613,10 +609,10 @@
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
r = self.getintbound(op)
- b = r.div_bound(b2)
+ b = r.py_div_bound(b2)
if b1.intersect(b):
self.propagate_bounds_backward(op.getarg(0))
- b = r.div_bound(b1)
+ b = r.py_div_bound(b1)
if b2.intersect(b):
self.propagate_bounds_backward(op.getarg(1))
diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py
--- a/rpython/jit/metainterp/optimizeopt/intutils.py
+++ b/rpython/jit/metainterp/optimizeopt/intutils.py
@@ -171,11 +171,14 @@
else:
return IntUnbounded()
- def div_bound(self, other):
+ def py_div_bound(self, other):
if self.has_upper and self.has_lower and \
other.has_upper and other.has_lower and \
not other.contains(0):
try:
+ # this gives the bounds for 'int_py_div', so use the
+ # Python-style handling of negative numbers and not
+ # the C-style one
vals = (ovfcheck(self.upper / other.upper),
ovfcheck(self.upper / other.lower),
ovfcheck(self.lower / other.upper),
diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py
--- a/rpython/jit/metainterp/optimizeopt/rewrite.py
+++ b/rpython/jit/metainterp/optimizeopt/rewrite.py
@@ -678,21 +678,23 @@
def optimize_GUARD_FUTURE_CONDITION(self, op):
self.optimizer.notice_guard_future_condition(op)
- def optimize_INT_FLOORDIV(self, op):
+ def optimize_INT_PY_DIV(self, op):
arg0 = op.getarg(0)
b1 = self.getintbound(arg0)
arg1 = op.getarg(1)
b2 = self.getintbound(arg1)
- if b2.is_constant() and b2.getint() == 1:
- self.make_equal_to(op, arg0)
- return
- elif b1.is_constant() and b1.getint() == 0:
+ if b1.is_constant() and b1.getint() == 0:
self.make_constant_int(op, 0)
return
- if b1.known_ge(IntBound(0, 0)) and b2.is_constant():
+ # This is Python's integer division: 'x // (2**shift)' can always
+ # be replaced with 'x >> shift', even for negative values of x
+ if b2.is_constant():
val = b2.getint()
- if val & (val - 1) == 0 and val > 0: # val == 2**shift
+ if val == 1:
+ self.make_equal_to(op, arg0)
+ return
+ elif val > 0 and val & (val - 1) == 0: # val == 2**shift
op = self.replace_op_with(op, rop.INT_RSHIFT,
args = [op.getarg(0), ConstInt(highest_bit(val))])
self.emit_operation(op)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
@@ -242,18 +242,18 @@
def test_div_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
- b3 = b1.div_bound(b2)
+ b3 = b1.py_div_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
if n2 != 0:
- assert b3.contains(n1 / n2)
+ assert b3.contains(n1 / n2) # Python-style div
- a=bound(2, 4).div_bound(bound(1, 2))
+ a=bound(2, 4).py_div_bound(bound(1, 2))
assert not a.contains(0)
assert not a.contains(5)
- a=bound(-3, 2).div_bound(bound(1, 2))
+ a=bound(-3, 2).py_div_bound(bound(1, 2))
assert not a.contains(-4)
assert not a.contains(3)
assert a.contains(-3)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -1849,7 +1849,7 @@
ops = """
[i0]
- i1 = int_floordiv(0, i0)
+ i1 = int_py_div(0, i0)
jump(i1)
"""
expected = """
@@ -4645,94 +4645,63 @@
def test_intmod_bounds(self):
ops = """
[i0, i1]
- i2 = int_mod(i0, 12)
- i3 = int_gt(i2, 12)
+ i2 = int_py_mod(i0, 12)
+ i3 = int_ge(i2, 12)
guard_false(i3) []
- i4 = int_lt(i2, -12)
+ i4 = int_lt(i2, 0)
guard_false(i4) []
- i5 = int_mod(i1, -12)
- i6 = int_lt(i5, -12)
+ i5 = int_py_mod(i1, -12)
+ i6 = int_le(i5, -12)
guard_false(i6) []
- i7 = int_gt(i5, 12)
+ i7 = int_gt(i5, 0)
guard_false(i7) []
jump(i2, i5)
"""
expected = """
[i0, i1]
- i2 = int_mod(i0, 12)
- i5 = int_mod(i1, -12)
+ i2 = int_py_mod(i0, 12)
+ i5 = int_py_mod(i1, -12)
jump(i2, i5)
"""
self.optimize_loop(ops, expected)
- # This the sequence of resoperations that is generated for a Python
- # app-level int % int. When the modulus is constant and when i0
- # is known non-negative it should be optimized to a single int_mod.
+ # same as above, but all guards are shifted by one so that they
+ # must stay
+ ops = """
+ [i8, i9]
+ i0 = escape_i()
+ i2 = int_py_mod(i0, 12)
+ i3 = int_ge(i2, 11)
+ guard_false(i3) []
+ i4 = int_lt(i2, 1)
+ guard_false(i4) []
+ i1 = escape_i()
+ i5 = int_py_mod(i1, -12)
+ i6 = int_le(i5, -11)
+ guard_false(i6) []
+ i7 = int_gt(i5, -1)
+ guard_false(i7) []
+ jump(i2, i5)
+ """
+ self.optimize_loop(ops, ops)
+
+ # 'n % power-of-two' can always be turned into int_and()
ops = """
[i0]
- i5 = int_ge(i0, 0)
- guard_true(i5) []
- i1 = int_mod(i0, 42)
- i2 = int_rshift(i1, %d)
- i3 = int_and(42, i2)
- i4 = int_add(i1, i3)
- finish(i4)
- """ % (LONG_BIT-1)
+ i1 = int_py_mod(i0, 8)
+ finish(i1)
+ """
expected = """
[i0]
- i5 = int_ge(i0, 0)
- guard_true(i5) []
- i1 = int_mod(i0, 42)
- finish(i1)
- """
- self.optimize_loop(ops, expected)
-
- # 'n % power-of-two' can be turned into int_and(); at least that's
- # easy to do now if n is known to be non-negative.
- ops = """
- [i0]
- i5 = int_ge(i0, 0)
- guard_true(i5) []
- i1 = int_mod(i0, 8)
- i2 = int_rshift(i1, %d)
- i3 = int_and(42, i2)
- i4 = int_add(i1, i3)
- finish(i4)
- """ % (LONG_BIT-1)
- expected = """
- [i0]
- i5 = int_ge(i0, 0)
- guard_true(i5) []
i1 = int_and(i0, 7)
finish(i1)
"""
self.optimize_loop(ops, expected)
- def test_intmod_bounds_harder(self):
- py.test.skip("harder")
- # Of course any 'maybe-negative % power-of-two' can be turned into
- # int_and(), but that's a bit harder to detect here because it turns
- # into several operations, and of course it is wrong to just turn
- # int_mod(i0, 16) into int_and(i0, 15).
+ def test_intmod_bounds_bug1(self):
ops = """
[i0]
- i1 = int_mod(i0, 16)
- i2 = int_rshift(i1, %d)
- i3 = int_and(16, i2)
- i4 = int_add(i1, i3)
- finish(i4)
- """ % (LONG_BIT-1)
- expected = """
- [i0]
- i4 = int_and(i0, 15)
- finish(i4)
- """
- self.optimize_loop(ops, expected)
-
- def test_intmod_bounds_bug1(self):
- ops = """
- [i0]
- i1 = int_mod(i0, %d)
+ i1 = int_py_mod(i0, %d)
i2 = int_eq(i1, 0)
guard_false(i2) []
finish()
From pypy.commits at gmail.com Sun May 8 13:13:23 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:23 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: new branch
Message-ID: <572f73b3.21f9c20a.d72fa.ffffb14d@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84292:c1655829c9d4
Date: 2016-05-08 17:44 +0200
http://bitbucket.org/pypy/pypy/changeset/c1655829c9d4/
Log: new branch
From pypy.commits at gmail.com Sun May 8 13:13:25 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:25 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Remove duplicated code (probably
mismerged)
Message-ID: <572f73b5.442cc20a.f07ae.ffffb075@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84293:0f65b92fc764
Date: 2016-05-08 17:49 +0200
http://bitbucket.org/pypy/pypy/changeset/0f65b92fc764/
Log: Remove duplicated code (probably mismerged)
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -806,7 +806,6 @@
indexlen = len(indexes_w)
dtype = arr.get_dtype()
iter = PureShapeIter(iter_shape, indexes_w)
- indexlen = len(indexes_w)
while not iter.done():
getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen,
dtype=dtype, prefixlen=prefixlen)
From pypy.commits at gmail.com Sun May 8 13:13:27 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:27 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Remove unused variable
Message-ID: <572f73b7.4ac0c20a.3edef.fffff7a1@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84294:d9c488803f99
Date: 2016-05-08 17:52 +0200
http://bitbucket.org/pypy/pypy/changeset/d9c488803f99/
Log: Remove unused variable
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -258,7 +258,6 @@
elif space.is_w(w_idx, space.w_None):
return [NewAxisChunk(), EllipsisChunk()]
result = []
- i = 0
has_ellipsis = False
has_filter = False
for w_item in space.fixedview(w_idx):
@@ -274,7 +273,6 @@
result.append(NewAxisChunk())
elif space.isinstance_w(w_item, space.w_slice):
result.append(SliceChunk(w_item))
- i += 1
elif isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool():
if has_filter:
# in CNumPy, the support for this is incomplete
@@ -287,7 +285,6 @@
result.append(IntegerChunk(w_item.descr_int(space)))
else:
result.append(IntegerChunk(w_item))
- i += 1
if not has_ellipsis:
result.append(EllipsisChunk())
return result
From pypy.commits at gmail.com Sun May 8 13:13:28 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:28 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: typo: missing char in comment
Message-ID: <572f73b8.875a1c0a.49d7a.ffff89e0@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84295:72449152cb72
Date: 2016-05-08 17:53 +0200
http://bitbucket.org/pypy/pypy/changeset/72449152cb72/
Log: typo: missing char in comment
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -199,7 +199,7 @@
reds='auto')
def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args):
- # out must hav been built. func needs no calc_type, is usually an
+ # out must have been built. func needs no calc_type, is usually an
# external ufunc
nin = len(in_args)
in_iters = [None] * nin
From pypy.commits at gmail.com Sun May 8 13:13:30 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:30 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Remove useless "pass" instruction
Message-ID: <572f73ba.876cc20a.1c4cc.ffffa3ed@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84296:3ac2d0590033
Date: 2016-05-08 17:56 +0200
http://bitbucket.org/pypy/pypy/changeset/3ac2d0590033/
Log: Remove useless "pass" instruction
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -8,7 +8,6 @@
class BaseChunk(object):
_attrs_ = ['step','out_dim']
- pass
class Chunk(BaseChunk):
From pypy.commits at gmail.com Sun May 8 13:13:32 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:32 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Whitespace fixes
Message-ID: <572f73bc.10691c0a.62ac.ffff91e9@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84297:0e8d993970af
Date: 2016-05-08 17:57 +0200
http://bitbucket.org/pypy/pypy/changeset/0e8d993970af/
Log: Whitespace fixes
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -7,7 +7,7 @@
# structures to describe slicing
class BaseChunk(object):
- _attrs_ = ['step','out_dim']
+ _attrs_ = ['step', 'out_dim']
class Chunk(BaseChunk):
@@ -35,6 +35,7 @@
class IntegerChunk(BaseChunk):
input_dim = 1
out_dim = 0
+
def __init__(self, w_idx):
self.w_idx = w_idx
@@ -69,6 +70,7 @@
class EllipsisChunk(BaseChunk):
input_dim = 0
out_dim = 0
+
def __init__(self):
pass
@@ -79,6 +81,7 @@
class BooleanChunk(BaseChunk):
input_dim = 1
out_dim = 1
+
def __init__(self, w_idx):
self.w_idx = w_idx
From pypy.commits at gmail.com Sun May 8 13:13:37 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:37 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Whitespace fix: space at EOL
Message-ID: <572f73c1.8344c20a.2d101.ffffb046@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84300:ae4e92b896c0
Date: 2016-05-08 18:08 +0200
http://bitbucket.org/pypy/pypy/changeset/ae4e92b896c0/
Log: Whitespace fix: space at EOL
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -231,11 +231,11 @@
dim = i
idx = c.w_idx
chunks.pop(i)
- chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
+ chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
space.w_None, space.w_None)))
break
if dim > 0:
- view = self.implementation.swapaxes(space, self, 0, dim)
+ view = self.implementation.swapaxes(space, self, 0, dim)
if dim >= 0:
view = new_view(space, self, chunks)
view.setitem_filter(space, idx, val_arr)
@@ -563,7 +563,7 @@
l_w = []
for i in range(self.get_shape()[0]):
item_w = self.descr_getitem(space, space.wrap(i))
- if (isinstance(item_w, W_NDimArray) or
+ if (isinstance(item_w, W_NDimArray) or
isinstance(item_w, boxes.W_GenericBox)):
l_w.append(space.call_method(item_w, "tolist"))
else:
@@ -740,7 +740,7 @@
space.str_w(self.get_dtype().descr_repr(space)),
space.str_w(new_dtype.descr_repr(space)), casting)
order = order_converter(space, space.wrap(order), self.get_order())
- if (not copy and new_dtype == self.get_dtype()
+ if (not copy and new_dtype == self.get_dtype()
and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order())
and (subok or type(self) is W_NDimArray)):
return self
From pypy.commits at gmail.com Sun May 8 13:13:39 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 10:13:39 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Backed out changeset: 7bdeb693d88b -
needed in order to change init() syntax for this class
Message-ID: <572f73c3.4ea81c0a.2c7ec.ffff9518@mx.google.com>
Author: Matti Picus
Branch: cleanups
Changeset: r84301:a42fff844fd3
Date: 2016-05-08 19:52 +0300
http://bitbucket.org/pypy/pypy/changeset/a42fff844fd3/
Log: Backed out changeset: 7bdeb693d88b - needed in order to change
init() syntax for this class
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -61,6 +61,9 @@
input_dim = 0
out_dim = 1
+ def __init__(self):
+ pass
+
def compute(self, space, base_length, base_stride):
return 0, 1, 0, 0
From pypy.commits at gmail.com Sun May 8 13:13:34 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:34 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Remove useless __init__() method
Message-ID: <572f73be.2472c20a.acfef.ffffb079@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84298:7bdeb693d88b
Date: 2016-05-08 18:03 +0200
http://bitbucket.org/pypy/pypy/changeset/7bdeb693d88b/
Log: Remove useless __init__() method
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -61,9 +61,6 @@
input_dim = 0
out_dim = 1
- def __init__(self):
- pass
-
def compute(self, space, base_length, base_stride):
return 0, 1, 0, 0
From pypy.commits at gmail.com Sun May 8 13:13:41 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 10:13:41 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: close branch to be merged
Message-ID: <572f73c5.e7bec20a.a4e5e.ffffa495@mx.google.com>
Author: Matti Picus
Branch: cleanups
Changeset: r84302:325008782c89
Date: 2016-05-08 20:10 +0300
http://bitbucket.org/pypy/pypy/changeset/325008782c89/
Log: close branch to be merged
From pypy.commits at gmail.com Sun May 8 13:13:35 2016
From: pypy.commits at gmail.com (vincentlegoll)
Date: Sun, 08 May 2016 10:13:35 -0700 (PDT)
Subject: [pypy-commit] pypy cleanups: Remove unused import
Message-ID: <572f73bf.634fc20a.fbf4e.ffffa189@mx.google.com>
Author: Vincent Legoll
Branch: cleanups
Changeset: r84299:143b90507ad8
Date: 2016-05-08 18:05 +0200
http://bitbucket.org/pypy/pypy/changeset/143b90507ad8/
Log: Remove unused import
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -1,6 +1,6 @@
from pypy.interpreter.error import oefmt
from rpython.rlib import jit
-from pypy.module.micronumpy import support, constants as NPY
+from pypy.module.micronumpy import constants as NPY
from pypy.module.micronumpy.base import W_NDimArray
From pypy.commits at gmail.com Sun May 8 13:13:43 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 10:13:43 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge small cleanups branch into default
Message-ID: <572f73c7.c486c20a.aa9a4.ffffa47d@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84303:b645c9d24033
Date: 2016-05-08 20:11 +0300
http://bitbucket.org/pypy/pypy/changeset/b645c9d24033/
Log: merge small cleanups branch into default
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -258,7 +258,6 @@
elif space.is_w(w_idx, space.w_None):
return [NewAxisChunk(), EllipsisChunk()]
result = []
- i = 0
has_ellipsis = False
has_filter = False
for w_item in space.fixedview(w_idx):
@@ -274,7 +273,6 @@
result.append(NewAxisChunk())
elif space.isinstance_w(w_item, space.w_slice):
result.append(SliceChunk(w_item))
- i += 1
elif isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool():
if has_filter:
# in CNumPy, the support for this is incomplete
@@ -287,7 +285,6 @@
result.append(IntegerChunk(w_item.descr_int(space)))
else:
result.append(IntegerChunk(w_item))
- i += 1
if not has_ellipsis:
result.append(EllipsisChunk())
return result
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -199,7 +199,7 @@
reds='auto')
def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args):
- # out must hav been built. func needs no calc_type, is usually an
+ # out must have been built. func needs no calc_type, is usually an
# external ufunc
nin = len(in_args)
in_iters = [None] * nin
@@ -806,7 +806,6 @@
indexlen = len(indexes_w)
dtype = arr.get_dtype()
iter = PureShapeIter(iter_shape, indexes_w)
- indexlen = len(indexes_w)
while not iter.done():
getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen,
dtype=dtype, prefixlen=prefixlen)
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -231,11 +231,11 @@
dim = i
idx = c.w_idx
chunks.pop(i)
- chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
+ chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
space.w_None, space.w_None)))
break
if dim > 0:
- view = self.implementation.swapaxes(space, self, 0, dim)
+ view = self.implementation.swapaxes(space, self, 0, dim)
if dim >= 0:
view = new_view(space, self, chunks)
view.setitem_filter(space, idx, val_arr)
@@ -563,7 +563,7 @@
l_w = []
for i in range(self.get_shape()[0]):
item_w = self.descr_getitem(space, space.wrap(i))
- if (isinstance(item_w, W_NDimArray) or
+ if (isinstance(item_w, W_NDimArray) or
isinstance(item_w, boxes.W_GenericBox)):
l_w.append(space.call_method(item_w, "tolist"))
else:
@@ -740,7 +740,7 @@
space.str_w(self.get_dtype().descr_repr(space)),
space.str_w(new_dtype.descr_repr(space)), casting)
order = order_converter(space, space.wrap(order), self.get_order())
- if (not copy and new_dtype == self.get_dtype()
+ if (not copy and new_dtype == self.get_dtype()
and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order())
and (subok or type(self) is W_NDimArray)):
return self
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -1,14 +1,13 @@
from pypy.interpreter.error import oefmt
from rpython.rlib import jit
-from pypy.module.micronumpy import support, constants as NPY
+from pypy.module.micronumpy import constants as NPY
from pypy.module.micronumpy.base import W_NDimArray
# structures to describe slicing
class BaseChunk(object):
- _attrs_ = ['step','out_dim']
- pass
+ _attrs_ = ['step', 'out_dim']
class Chunk(BaseChunk):
@@ -36,6 +35,7 @@
class IntegerChunk(BaseChunk):
input_dim = 1
out_dim = 0
+
def __init__(self, w_idx):
self.w_idx = w_idx
@@ -70,6 +70,7 @@
class EllipsisChunk(BaseChunk):
input_dim = 0
out_dim = 0
+
def __init__(self):
pass
@@ -80,6 +81,7 @@
class BooleanChunk(BaseChunk):
input_dim = 1
out_dim = 1
+
def __init__(self, w_idx):
self.w_idx = w_idx
From pypy.commits at gmail.com Sun May 8 13:15:48 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 10:15:48 -0700 (PDT)
Subject: [pypy-commit] pypy default: document merged branch
Message-ID: <572f7444.838e1c0a.8d9e0.ffff8b4b@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84304:bff74066a4e2
Date: 2016-05-08 20:15 +0300
http://bitbucket.org/pypy/pypy/changeset/bff74066a4e2/
Log: document merged branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -79,3 +79,6 @@
It is a more flexible way to make RPython finalizers.
.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
From pypy.commits at gmail.com Sun May 8 14:26:30 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 08 May 2016 11:26:30 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: kill test_immutabledoc
Message-ID: <572f84d6.c61ec20a.b18a4.ffffc9da@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84305:95430613866d
Date: 2016-05-08 11:25 -0700
http://bitbucket.org/pypy/pypy/changeset/95430613866d/
Log: kill test_immutabledoc
diff --git a/pypy/interpreter/test/test_class.py b/pypy/interpreter/test/test_class.py
--- a/pypy/interpreter/test/test_class.py
+++ b/pypy/interpreter/test/test_class.py
@@ -123,14 +123,3 @@
assert C.__qualname__ == 'test_qualname..C'
assert C.D.__qualname__ == 'test_qualname..C.D'
assert not hasattr(C(), '__qualname__')
-
- def test_set_doc(self):
- class X:
- "elephant"
- X.__doc__ = "banana"
- assert X.__doc__ == "banana"
- raises(TypeError, lambda:
- type(list).__dict__["__doc__"].__set__(list, "blah"))
- raises((AttributeError, TypeError), lambda:
- type(X).__dict__["__doc__"].__delete__(X))
- assert X.__doc__ == "banana"
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -500,22 +500,16 @@
assert ImplicitDoc.__doc__ == 'foo'
- def test_immutabledoc(self):
- class ImmutableDoc(object):
- "foo"
-
- try:
- ImmutableDoc.__doc__ = "bar"
- except TypeError:
- pass
- except AttributeError:
- # XXX - Python raises TypeError for several descriptors,
- # we always raise AttributeError.
- pass
- else:
- raise AssertionError('__doc__ should not be writable')
-
- assert ImmutableDoc.__doc__ == 'foo'
+ def test_set_doc(self):
+ class X:
+ "elephant"
+ X.__doc__ = "banana"
+ assert X.__doc__ == "banana"
+ raises(TypeError, lambda:
+ type(list).__dict__["__doc__"].__set__(list, "blah"))
+ raises((AttributeError, TypeError), lambda:
+ type(X).__dict__["__doc__"].__delete__(X))
+ assert X.__doc__ == "banana"
def test_metaclass_conflict(self):
"""
From pypy.commits at gmail.com Sun May 8 14:33:01 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 08 May 2016 11:33:01 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: fix after merge
Message-ID: <572f865d.c486c20a.aa9a4.ffffbe37@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84306:7832fcede2fc
Date: 2016-05-08 11:32 -0700
http://bitbucket.org/pypy/pypy/changeset/7832fcede2fc/
Log: fix after merge
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -1,4 +1,6 @@
-import py
+import sys
+
+import py, pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
From pypy.commits at gmail.com Sun May 8 15:00:50 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Sun, 08 May 2016 12:00:50 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: kill SSLContext.check_hostname,
it's for 3.4 (or modern 2.9) ssl.py. exposing
Message-ID: <572f8ce2.634fc20a.fbf4e.ffffc442@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84307:3be2e22ef987
Date: 2016-05-08 11:59 -0700
http://bitbucket.org/pypy/pypy/changeset/3be2e22ef987/
Log: kill SSLContext.check_hostname, it's for 3.4 (or modern 2.9) ssl.py.
exposing it gives the impression that we provide a 3.4 ssl.py that
uses it to do ssl hostname matching in do_handshake, e.g.:
https://github.com/python/asyncio/blob/309a218/asyncio/selector_even
ts.py#L828
(without this change, this code never matches hostnames!)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -1785,8 +1785,9 @@
SSLContext.descr_set_verify_mode),
verify_flags=GetSetProperty(SSLContext.descr_get_verify_flags,
SSLContext.descr_set_verify_flags),
- check_hostname=GetSetProperty(SSLContext.descr_get_check_hostname,
- SSLContext.descr_set_check_hostname),
+ # XXX: For use by 3.4 ssl.py only
+ #check_hostname=GetSetProperty(SSLContext.descr_get_check_hostname,
+ # SSLContext.descr_set_check_hostname),
)
diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py
--- a/pypy/module/_ssl/test/test_ssl.py
+++ b/pypy/module/_ssl/test/test_ssl.py
@@ -105,7 +105,8 @@
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
def test_context(self):
- import _ssl
+ import _ssl, sys
+ py33 = sys.version_info[:2] == (3, 3)
s = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1)
raises(ValueError, _ssl._SSLContext, -1)
@@ -115,10 +116,13 @@
assert not s.options & _ssl.OP_NO_SSLv2
raises(TypeError, "s.options = 2.5")
- assert not s.check_hostname
- exc = raises(ValueError, "s.check_hostname = True")
- assert str(exc.value) == "check_hostname needs a SSL context with " \
- "either CERT_OPTIONAL or CERT_REQUIRED"
+ if py33:
+ assert not hasattr(s, 'check_hostname')
+ else:
+ assert not s.check_hostname
+ exc = raises(ValueError, "s.check_hostname = True")
+ assert str(exc.value) == "check_hostname needs a SSL context " \
+ "with either CERT_OPTIONAL or CERT_REQUIRED"
assert s.verify_mode == _ssl.CERT_NONE
s.verify_mode = _ssl.CERT_REQUIRED
@@ -133,12 +137,13 @@
s.verify_flags = _ssl.VERIFY_DEFAULT
assert s.verify_flags == _ssl.VERIFY_DEFAULT
- s.check_hostname = True
- assert s.check_hostname
+ if not py33:
+ s.check_hostname = True
+ assert s.check_hostname
- exc = raises(ValueError, "s.verify_mode = _ssl.CERT_NONE")
- assert str(exc.value) == "Cannot set verify_mode to CERT_NONE " \
- "when check_hostname is enabled."
+ exc = raises(ValueError, "s.verify_mode = _ssl.CERT_NONE")
+ assert str(exc.value) == "Cannot set verify_mode to CERT_NONE " \
+ "when check_hostname is enabled."
def test_set_default_verify_paths(self):
import _ssl
From pypy.commits at gmail.com Sun May 8 15:32:30 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:30 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: inherit slot functions from
base, bases types
Message-ID: <572f944e.4ca51c0a.f2226.ffffc48f@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84308:a825b9c50a12
Date: 2016-05-07 21:51 +0300
http://bitbucket.org/pypy/pypy/changeset/a825b9c50a12/
Log: inherit slot functions from base, bases types
From pypy.commits at gmail.com Sun May 8 15:32:32 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:32 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: simplify large switch for
unary functions
Message-ID: <572f9450.c8eac20a.f98cc.ffffdfd0@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84309:efc2f2833423
Date: 2016-05-07 21:52 +0300
http://bitbucket.org/pypy/pypy/changeset/efc2f2833423/
Log: simplify large switch for unary functions
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -374,7 +374,29 @@
header = pypy_decl
if mangle_name('', typedef.name) is None:
header = None
- if name == 'tp_setattro':
+ handled = False
+ # unary functions
+ for tp_name, attr in [('tp_as_number.c_nb_int', '__int__'),
+ ('tp_as_number.c_nb_float', '__float__'),
+ ('tp_str', '__str__'),
+ ('tp_iter', '__iter__'),
+ ('tp_iternext', 'next'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if int_fn is None:
+ return
+
+ @cpython_api([PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self):
+ return space.call_function(slot_fn, w_self)
+ api_func = slot_func.api_func
+ handled = True
+
+ if handled:
+ pass
+ elif name == 'tp_setattro':
setattr_fn = w_type.getdictvalue(space, '__setattr__')
delattr_fn = w_type.getdictvalue(space, '__delattr__')
if setattr_fn is None:
@@ -401,28 +423,6 @@
return space.call_function(getattr_fn, w_self, w_name)
api_func = slot_tp_getattro.api_func
- elif name == 'tp_as_number.c_nb_int':
- int_fn = w_type.getdictvalue(space, '__int__')
- if int_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_int(space, w_self):
- return space.call_function(int_fn, w_self)
- api_func = slot_nb_int.api_func
-
- elif name == 'tp_as_number.c_nb_float':
- float_fn = w_type.getdictvalue(space, '__float__')
- if float_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_float(space, w_self):
- return space.call_function(float_fn, w_self)
- api_func = slot_nb_float.api_func
-
elif name == 'tp_call':
call_fn = w_type.getdictvalue(space, '__call__')
if call_fn is None:
@@ -436,44 +436,6 @@
return space.call_args(call_fn, args)
api_func = slot_tp_call.api_func
- elif name == 'tp_str':
- str_fn = w_type.getdictvalue(space, '__str__')
- if str_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_str(space, w_self):
- return space.call_function(str_fn, w_self)
- api_func = slot_tp_str.api_func
-
- elif name == 'tp_iter':
- iter_fn = w_type.getdictvalue(space, '__iter__')
- if iter_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_iter(space, w_self):
- return space.call_function(iter_fn, w_self)
- api_func = slot_tp_iter.api_func
-
- elif name == 'tp_iternext':
- iternext_fn = w_type.getdictvalue(space, 'next')
- if iternext_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_iternext(space, w_self):
- try:
- return space.call_function(iternext_fn, w_self)
- except OperationError as e:
- if not e.match(space, space.w_StopIteration):
- raise
- return None
- api_func = slot_tp_iternext.api_func
-
elif name == 'tp_init':
init_fn = w_type.getdictvalue(space, '__init__')
if init_fn is None:
@@ -501,6 +463,7 @@
return space.call_args(space.get(new_fn, w_self), args)
api_func = slot_tp_new.api_func
else:
+ print 'unhandled slot',name,'for',w_type
return
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
From pypy.commits at gmail.com Sun May 8 15:32:34 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:34 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: typo
Message-ID: <572f9452.d72d1c0a.4dc63.ffffc090@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84310:03f7118df38b
Date: 2016-05-07 22:49 +0300
http://bitbucket.org/pypy/pypy/changeset/03f7118df38b/
Log: typo
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -384,7 +384,7 @@
]:
if name == tp_name:
slot_fn = w_type.getdictvalue(space, attr)
- if int_fn is None:
+ if slot_fn is None:
return
@cpython_api([PyObject], PyObject, header=header)
From pypy.commits at gmail.com Sun May 8 15:32:36 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:36 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: add more unary, binary
functions; seems to fix np.float32(5.0) * 1.0 problem
Message-ID: <572f9454.45bd1c0a.97aec.ffffbeba@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84311:c08bfcbb6937
Date: 2016-05-08 18:45 +0300
http://bitbucket.org/pypy/pypy/changeset/c08bfcbb6937/
Log: add more unary, binary functions; seems to fix np.float32(5.0) * 1.0
problem
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -378,7 +378,12 @@
# unary functions
for tp_name, attr in [('tp_as_number.c_nb_int', '__int__'),
('tp_as_number.c_nb_float', '__float__'),
+ ('tp_as_number.c_nb_negative', '__neg__'),
+ ('tp_as_number.c_nb_positive', '__pos__'),
+ ('tp_as_number.c_nb_absolute', '__abs__'),
+ ('tp_as_number.c_nb_invert', '__invert__'),
('tp_str', '__str__'),
+ ('tp_repr', '__repr__'),
('tp_iter', '__iter__'),
('tp_iternext', 'next'),
]:
@@ -394,6 +399,41 @@
api_func = slot_func.api_func
handled = True
+ # binary functions
+ for tp_name, attr in [('tp_as_number.c_nb_add', '__add__'),
+ ('tp_as_number.c_nb_subtract', '__subtract__'),
+ ('tp_as_number.c_nb_multiply', '__mul__'),
+ ('tp_as_number.c_nb_divide', '__div__'),
+ ('tp_as_number.c_nb_remainder', '__mod__'),
+ ('tp_as_number.c_nb_divmod', '__divmod__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg):
+ return space.call_function(slot_fn, w_self, w_arg)
+ api_func = slot_func.api_func
+ handled = True
+
+ # ternary functions
+ for tp_name, attr in [('tp_as_number.c_nb_power', ''),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg1, w_arg2):
+ return space.call_function(slot_fn, w_self, w_arg1, w_arg2)
+ api_func = slot_func.api_func
+ handled = True
+
if handled:
pass
elif name == 'tp_setattro':
@@ -463,7 +503,6 @@
return space.call_args(space.get(new_fn, w_self), args)
api_func = slot_tp_new.api_func
else:
- print 'unhandled slot',name,'for',w_type
return
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
From pypy.commits at gmail.com Sun May 8 15:32:38 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:38 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: add more slot functions
Message-ID: <572f9456.c8eac20a.f98cc.ffffdfdc@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84312:d41bc96f8882
Date: 2016-05-08 21:58 +0300
http://bitbucket.org/pypy/pypy/changeset/d41bc96f8882/
Log: add more slot functions
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -377,15 +377,16 @@
handled = False
# unary functions
for tp_name, attr in [('tp_as_number.c_nb_int', '__int__'),
+ ('tp_as_number.c_nb_long', '__long__'),
('tp_as_number.c_nb_float', '__float__'),
('tp_as_number.c_nb_negative', '__neg__'),
('tp_as_number.c_nb_positive', '__pos__'),
('tp_as_number.c_nb_absolute', '__abs__'),
('tp_as_number.c_nb_invert', '__invert__'),
+ ('tp_as_number.c_nb_index', '__index__'),
('tp_str', '__str__'),
('tp_repr', '__repr__'),
('tp_iter', '__iter__'),
- ('tp_iternext', 'next'),
]:
if name == tp_name:
slot_fn = w_type.getdictvalue(space, attr)
@@ -406,6 +407,11 @@
('tp_as_number.c_nb_divide', '__div__'),
('tp_as_number.c_nb_remainder', '__mod__'),
('tp_as_number.c_nb_divmod', '__divmod__'),
+ ('tp_as_number.c_nb_lshift', '__lshift__'),
+ ('tp_as_number.c_nb_rshift', '__rshift__'),
+ ('tp_as_number.c_nb_and', '__and__'),
+ ('tp_as_number.c_nb_xor', '__xor__'),
+ ('tp_as_number.c_nb_or', '__or__'),
]:
if name == tp_name:
slot_fn = w_type.getdictvalue(space, attr)
@@ -503,6 +509,7 @@
return space.call_args(space.get(new_fn, w_self), args)
api_func = slot_tp_new.api_func
else:
+ # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce
return
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
From pypy.commits at gmail.com Sun May 8 15:32:40 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:40 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: tp_internext needs to be
handled seperately
Message-ID: <572f9458.c61ec20a.b18a4.ffffdfa5@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84313:eaa65dc3d92b
Date: 2016-05-08 22:09 +0300
http://bitbucket.org/pypy/pypy/changeset/eaa65dc3d92b/
Log: tp_internext needs to be handled seperately
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -482,6 +482,22 @@
return space.call_args(call_fn, args)
api_func = slot_tp_call.api_func
+ elif name == 'tp_iternext':
+ iternext_fn = w_type.getdictvalue(space, 'next')
+ if iternext_fn is None:
+ return
+
+ @cpython_api([PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_tp_iternext(space, w_self):
+ try:
+ return space.call_function(iternext_fn, w_self)
+ except OperationError as e:
+ if not e.match(space, space.w_StopIteration):
+ raise
+ return None
+ api_func = slot_tp_iternext.api_func
+
elif name == 'tp_init':
init_fn = w_type.getdictvalue(space, '__init__')
if init_fn is None:
From pypy.commits at gmail.com Sun May 8 15:32:41 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 12:32:41 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: merge default into branch
Message-ID: <572f9459.de361c0a.db8e8.ffffb812@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84314:eef6d2175abb
Date: 2016-05-08 22:31 +0300
http://bitbucket.org/pypy/pypy/changeset/eef6d2175abb/
Log: merge default into branch
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -79,3 +79,6 @@
It is a more flexible way to make RPython finalizers.
.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
diff --git a/pypy/module/_multibytecodec/app_multibytecodec.py b/pypy/module/_multibytecodec/app_multibytecodec.py
--- a/pypy/module/_multibytecodec/app_multibytecodec.py
+++ b/pypy/module/_multibytecodec/app_multibytecodec.py
@@ -44,8 +44,10 @@
self, data))
def reset(self):
- self.stream.write(MultibyteIncrementalEncoder.encode(
- self, '', final=True))
+ data = MultibyteIncrementalEncoder.encode(
+ self, '', final=True)
+ if len(data) > 0:
+ self.stream.write(data)
MultibyteIncrementalEncoder.reset(self)
def writelines(self, lines):
diff --git a/pypy/module/_multibytecodec/test/test_app_stream.py b/pypy/module/_multibytecodec/test/test_app_stream.py
--- a/pypy/module/_multibytecodec/test/test_app_stream.py
+++ b/pypy/module/_multibytecodec/test/test_app_stream.py
@@ -90,3 +90,15 @@
w.write(u'\u304b')
w.write(u'\u309a')
assert w.stream.output == ['\x83m', '', '\x82\xf5']
+
+ def test_writer_seek_no_empty_write(self):
+ # issue #2293: codecs.py will sometimes issue a reset()
+ # on a StreamWriter attached to a file that is not opened
+ # for writing at all. We must not emit a "write('')"!
+ class FakeFile:
+ def write(self, data):
+ raise IOError("can't write!")
+ #
+ w = self.ShiftJisx0213StreamWriter(FakeFile())
+ w.reset()
+ # assert did not crash
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -258,7 +258,6 @@
elif space.is_w(w_idx, space.w_None):
return [NewAxisChunk(), EllipsisChunk()]
result = []
- i = 0
has_ellipsis = False
has_filter = False
for w_item in space.fixedview(w_idx):
@@ -274,7 +273,6 @@
result.append(NewAxisChunk())
elif space.isinstance_w(w_item, space.w_slice):
result.append(SliceChunk(w_item))
- i += 1
elif isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool():
if has_filter:
# in CNumPy, the support for this is incomplete
@@ -287,7 +285,6 @@
result.append(IntegerChunk(w_item.descr_int(space)))
else:
result.append(IntegerChunk(w_item))
- i += 1
if not has_ellipsis:
result.append(EllipsisChunk())
return result
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -199,7 +199,7 @@
reds='auto')
def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args):
- # out must hav been built. func needs no calc_type, is usually an
+ # out must have been built. func needs no calc_type, is usually an
# external ufunc
nin = len(in_args)
in_iters = [None] * nin
@@ -806,7 +806,6 @@
indexlen = len(indexes_w)
dtype = arr.get_dtype()
iter = PureShapeIter(iter_shape, indexes_w)
- indexlen = len(indexes_w)
while not iter.done():
getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen,
dtype=dtype, prefixlen=prefixlen)
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -231,11 +231,11 @@
dim = i
idx = c.w_idx
chunks.pop(i)
- chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
+ chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
space.w_None, space.w_None)))
break
if dim > 0:
- view = self.implementation.swapaxes(space, self, 0, dim)
+ view = self.implementation.swapaxes(space, self, 0, dim)
if dim >= 0:
view = new_view(space, self, chunks)
view.setitem_filter(space, idx, val_arr)
@@ -563,7 +563,7 @@
l_w = []
for i in range(self.get_shape()[0]):
item_w = self.descr_getitem(space, space.wrap(i))
- if (isinstance(item_w, W_NDimArray) or
+ if (isinstance(item_w, W_NDimArray) or
isinstance(item_w, boxes.W_GenericBox)):
l_w.append(space.call_method(item_w, "tolist"))
else:
@@ -740,7 +740,7 @@
space.str_w(self.get_dtype().descr_repr(space)),
space.str_w(new_dtype.descr_repr(space)), casting)
order = order_converter(space, space.wrap(order), self.get_order())
- if (not copy and new_dtype == self.get_dtype()
+ if (not copy and new_dtype == self.get_dtype()
and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order())
and (subok or type(self) is W_NDimArray)):
return self
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -1,14 +1,13 @@
from pypy.interpreter.error import oefmt
from rpython.rlib import jit
-from pypy.module.micronumpy import support, constants as NPY
+from pypy.module.micronumpy import constants as NPY
from pypy.module.micronumpy.base import W_NDimArray
# structures to describe slicing
class BaseChunk(object):
- _attrs_ = ['step','out_dim']
- pass
+ _attrs_ = ['step', 'out_dim']
class Chunk(BaseChunk):
@@ -36,6 +35,7 @@
class IntegerChunk(BaseChunk):
input_dim = 1
out_dim = 0
+
def __init__(self, w_idx):
self.w_idx = w_idx
@@ -70,6 +70,7 @@
class EllipsisChunk(BaseChunk):
input_dim = 0
out_dim = 0
+
def __init__(self):
pass
@@ -80,6 +81,7 @@
class BooleanChunk(BaseChunk):
input_dim = 1
out_dim = 1
+
def __init__(self, w_idx):
self.w_idx = w_idx
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
@@ -56,7 +56,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -285,7 +285,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -326,7 +328,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
@@ -1523,21 +1529,30 @@
import gc; gc.collect(); gc.collect(); gc.collect()
assert seen == [3]
+ def test_gc_disable(self):
+ ffi = FFI(backend=self.Backend())
+ p = ffi.new("int *", 123)
+ py.test.raises(TypeError, ffi.gc, p, None)
+ seen = []
+ q1 = ffi.gc(p, lambda p: seen.append(1))
+ q2 = ffi.gc(q1, lambda p: seen.append(2))
+ import gc; gc.collect()
+ assert seen == []
+ assert ffi.gc(q1, None) is None
+ del q1, q2
+ import gc; gc.collect(); gc.collect(); gc.collect()
+ assert seen == [2]
+
def test_gc_finite_list(self):
ffi = FFI(backend=self.Backend())
- public = not hasattr(ffi._backend, 'gcp')
p = ffi.new("int *", 123)
keepalive = []
for i in range(10):
keepalive.append(ffi.gc(p, lambda p: None))
- if public:
- assert len(ffi.gc_weakrefs.data) == i + 1
del keepalive[:]
import gc; gc.collect(); gc.collect()
for i in range(10):
keepalive.append(ffi.gc(p, lambda p: None))
- if public:
- assert len(ffi.gc_weakrefs.data) == 10
def test_CData_CType(self):
ffi = FFI(backend=self.Backend())
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
@@ -467,12 +467,12 @@
def test_introspect_order(self):
ffi = FFI()
- ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
- ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;")
- ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
+ ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
+ ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
def test_unpack(self):
ffi = FFI()
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
@@ -139,7 +139,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -351,7 +351,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -391,7 +393,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1898,14 +1898,14 @@
def test_introspect_order():
ffi = FFI()
- ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
- ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;")
- ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;")
+ ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
+ ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
+ ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
verify(ffi, "test_introspect_order", """
- union aaa { int a; }; typedef struct ccc { int a; } b;
- union g { int a; }; typedef struct cc { int a; } bbb;
- union aa { int a; }; typedef struct a { int a; } bb;
+ union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+ union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;
+ union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;
""")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
@@ -280,6 +280,14 @@
pass
with open("setup.py", "w") as f:
f.write("""if 1:
+ # https://bugs.python.org/issue23246
+ import sys
+ if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
+
import cffi
ffi = cffi.FFI()
ffi.set_source("pack1.mymod", "/*code would be here*/")
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
@@ -80,8 +80,21 @@
# find a solution to that: we could hack sys.path inside the
# script run here, but we can't hack it in the same way in
# execute().
- output = self._run([sys.executable,
- os.path.join(local_dir, filename)])
+ pathname = os.path.join(path, filename)
+ with open(pathname, 'w') as g:
+ g.write('''
+# https://bugs.python.org/issue23246
+import sys
+if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
+''')
+ with open(os.path.join(local_dir, filename), 'r') as f:
+ g.write(f.read())
+
+ output = self._run([sys.executable, pathname])
match = re.compile(r"\bFILENAME: (.+)").search(output)
assert match
dynamic_lib_name = match.group(1)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/udir.py b/pypy/module/test_lib_pypy/cffi_tests/udir.py
--- a/pypy/module/test_lib_pypy/cffi_tests/udir.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/udir.py
@@ -1,4 +1,14 @@
# Generated by pypy/tool/import_cffi.py
import py
+import sys
udir = py.path.local.make_numbered_dir(prefix = 'ffi-')
+
+
+# Windows-only workaround for some configurations: see
+# https://bugs.python.org/issue23246 (Python 2.7.9)
+if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py
--- a/rpython/jit/metainterp/optimizeopt/intutils.py
+++ b/rpython/jit/metainterp/optimizeopt/intutils.py
@@ -1,5 +1,8 @@
+import sys
from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, maxint, is_valid_int
from rpython.rlib.objectmodel import we_are_translated
+from rpython.rtyper.lltypesystem import lltype
+from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.metainterp.resoperation import rop, ResOperation
from rpython.jit.metainterp.optimizeopt.info import AbstractInfo, INFO_NONNULL,\
INFO_UNKNOWN, INFO_NULL
@@ -174,15 +177,13 @@
def div_bound(self, other):
if self.has_upper and self.has_lower and \
other.has_upper and other.has_lower and \
- not other.contains(0):
- try:
- vals = (ovfcheck(self.upper / other.upper),
- ovfcheck(self.upper / other.lower),
- ovfcheck(self.lower / other.upper),
- ovfcheck(self.lower / other.lower))
- return IntBound(min4(vals), max4(vals))
- except OverflowError:
- return IntUnbounded()
+ not other.contains(0) and self.lower > (-sys.maxint-1):
+ vals = (
+ llop.int_floordiv(lltype.Signed, self.upper, other.upper),
+ llop.int_floordiv(lltype.Signed, self.upper, other.lower),
+ llop.int_floordiv(lltype.Signed, self.lower, other.upper),
+ llop.int_floordiv(lltype.Signed, self.lower, other.lower))
+ return IntBound(min4(vals), max4(vals))
else:
return IntUnbounded()
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
@@ -240,6 +240,8 @@
def test_div_bound():
+ from rpython.rtyper.lltypesystem import lltype
+ from rpython.rtyper.lltypesystem.lloperation import llop
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.div_bound(b2)
@@ -247,7 +249,8 @@
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
if n2 != 0:
- assert b3.contains(n1 / n2)
+ assert b3.contains(
+ llop.int_floordiv(lltype.Signed, n1, n2))
a=bound(2, 4).div_bound(bound(1, 2))
assert not a.contains(0)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -5529,6 +5529,27 @@
"""
self.optimize_loop(ops, expected)
+ def test_division_bound_bug(self):
+ ops = """
+ [i4]
+ i1 = int_ge(i4, -50)
+ guard_true(i1) []
+ i2 = int_le(i4, -40)
+ guard_true(i2) []
+ # here, -50 <= i4 <= -40
+
+ i5 = int_floordiv(i4, 30)
+ # here, we know that that i5 == -1 (C-style handling of negatives!)
+ escape_n(i5)
+ jump(i4)
+ """
+ expected = """
+ [i4, i5]
+ escape_n(-1)
+ jump(i4, -1)
+ """
+ self.optimize_loop(ops, expected)
+
def test_subsub_ovf(self):
ops = """
[i0]
From pypy.commits at gmail.com Sun May 8 23:26:51 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 20:26:51 -0700 (PDT)
Subject: [pypy-commit] pypy default: fix tests for 32 bit and running as
root in chroot
Message-ID: <5730037b.4ca51c0a.f2226.471d@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84315:392dd419f5d0
Date: 2016-05-09 03:19 +0000
http://bitbucket.org/pypy/pypy/changeset/392dd419f5d0/
Log: fix tests for 32 bit and running as root in chroot
tests still fail since rffi.INTPTR_T is ending up as a 'Signed' in
the ptr-to-function signature
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root
from pypy.module.cpyext.state import State
@@ -100,7 +100,8 @@
PyPy_TypedefTest2(space, ppos)
lltype.free(ppos, flavor='raw')
-
+ at pytest.mark.skipif(os.environ.get('USER')=='root',
+ reason='root can write to all files')
def test_copy_header_files(tmpdir):
api.copy_header_files(tmpdir, True)
def check(name):
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -40,7 +40,7 @@
#endif
if(s->ob_type->tp_basicsize != expected_size)
{
- printf("tp_basicsize==%ld\\n", s->ob_type->tp_basicsize);
+ printf("tp_basicsize==%zd\\n", s->ob_type->tp_basicsize);
result = 0;
}
Py_DECREF(s);
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -1521,7 +1521,7 @@
# Instantiated in cpyext/ndarrayobject. It is here since ufunc calls
# set_dims_and_steps, otherwise ufunc, ndarrayobject would have circular
# imports
-npy_intpp = rffi.LONGP
+npy_intpp = rffi.INTPTR_T
LONG_SIZE = LONG_BIT / 8
CCHARP_SIZE = _get_bitsize('P') / 8
From pypy.commits at gmail.com Sun May 8 23:30:47 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 20:30:47 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-more-slots: close branch to be merged
Message-ID: <57300467.a9a1c20a.a747e.560b@mx.google.com>
Author: Matti Picus
Branch: cpyext-more-slots
Changeset: r84316:1c44cdfc2868
Date: 2016-05-09 06:28 +0300
http://bitbucket.org/pypy/pypy/changeset/1c44cdfc2868/
Log: close branch to be merged
From pypy.commits at gmail.com Sun May 8 23:30:49 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 08 May 2016 20:30:49 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge cpyext-more-slots which fill in
more slots from w_type
Message-ID: <57300469.a553c20a.33b82.5850@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84317:e4e5becbcac3
Date: 2016-05-09 06:30 +0300
http://bitbucket.org/pypy/pypy/changeset/e4e5becbcac3/
Log: merge cpyext-more-slots which fill in more slots from w_type
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -82,3 +82,5 @@
.. branch: cleanups
+.. branch: cpyext-more-slots
+
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -374,7 +374,75 @@
header = pypy_decl
if mangle_name('', typedef.name) is None:
header = None
- if name == 'tp_setattro':
+ handled = False
+ # unary functions
+ for tp_name, attr in [('tp_as_number.c_nb_int', '__int__'),
+ ('tp_as_number.c_nb_long', '__long__'),
+ ('tp_as_number.c_nb_float', '__float__'),
+ ('tp_as_number.c_nb_negative', '__neg__'),
+ ('tp_as_number.c_nb_positive', '__pos__'),
+ ('tp_as_number.c_nb_absolute', '__abs__'),
+ ('tp_as_number.c_nb_invert', '__invert__'),
+ ('tp_as_number.c_nb_index', '__index__'),
+ ('tp_str', '__str__'),
+ ('tp_repr', '__repr__'),
+ ('tp_iter', '__iter__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self):
+ return space.call_function(slot_fn, w_self)
+ api_func = slot_func.api_func
+ handled = True
+
+ # binary functions
+ for tp_name, attr in [('tp_as_number.c_nb_add', '__add__'),
+ ('tp_as_number.c_nb_subtract', '__subtract__'),
+ ('tp_as_number.c_nb_multiply', '__mul__'),
+ ('tp_as_number.c_nb_divide', '__div__'),
+ ('tp_as_number.c_nb_remainder', '__mod__'),
+ ('tp_as_number.c_nb_divmod', '__divmod__'),
+ ('tp_as_number.c_nb_lshift', '__lshift__'),
+ ('tp_as_number.c_nb_rshift', '__rshift__'),
+ ('tp_as_number.c_nb_and', '__and__'),
+ ('tp_as_number.c_nb_xor', '__xor__'),
+ ('tp_as_number.c_nb_or', '__or__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg):
+ return space.call_function(slot_fn, w_self, w_arg)
+ api_func = slot_func.api_func
+ handled = True
+
+ # ternary functions
+ for tp_name, attr in [('tp_as_number.c_nb_power', ''),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg1, w_arg2):
+ return space.call_function(slot_fn, w_self, w_arg1, w_arg2)
+ api_func = slot_func.api_func
+ handled = True
+
+ if handled:
+ pass
+ elif name == 'tp_setattro':
setattr_fn = w_type.getdictvalue(space, '__setattr__')
delattr_fn = w_type.getdictvalue(space, '__delattr__')
if setattr_fn is None:
@@ -401,28 +469,6 @@
return space.call_function(getattr_fn, w_self, w_name)
api_func = slot_tp_getattro.api_func
- elif name == 'tp_as_number.c_nb_int':
- int_fn = w_type.getdictvalue(space, '__int__')
- if int_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_int(space, w_self):
- return space.call_function(int_fn, w_self)
- api_func = slot_nb_int.api_func
-
- elif name == 'tp_as_number.c_nb_float':
- float_fn = w_type.getdictvalue(space, '__float__')
- if float_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_float(space, w_self):
- return space.call_function(float_fn, w_self)
- api_func = slot_nb_float.api_func
-
elif name == 'tp_call':
call_fn = w_type.getdictvalue(space, '__call__')
if call_fn is None:
@@ -436,28 +482,6 @@
return space.call_args(call_fn, args)
api_func = slot_tp_call.api_func
- elif name == 'tp_str':
- str_fn = w_type.getdictvalue(space, '__str__')
- if str_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_str(space, w_self):
- return space.call_function(str_fn, w_self)
- api_func = slot_tp_str.api_func
-
- elif name == 'tp_iter':
- iter_fn = w_type.getdictvalue(space, '__iter__')
- if iter_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_iter(space, w_self):
- return space.call_function(iter_fn, w_self)
- api_func = slot_tp_iter.api_func
-
elif name == 'tp_iternext':
iternext_fn = w_type.getdictvalue(space, 'next')
if iternext_fn is None:
@@ -501,6 +525,7 @@
return space.call_args(space.get(new_fn, w_self), args)
api_func = slot_tp_new.api_func
else:
+ # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce
return
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
From pypy.commits at gmail.com Mon May 9 03:01:02 2016
From: pypy.commits at gmail.com (william_ml_leslie)
Date: Mon, 09 May 2016 00:01:02 -0700 (PDT)
Subject: [pypy-commit] pypy default: make TranslatorDriver.from_targetspec a
classmethod
Message-ID: <573035ae.171d1c0a.9ac59.ffff8337@mx.google.com>
Author: William ML Leslie
Branch:
Changeset: r84318:6ffd3556369a
Date: 2016-05-09 16:59 +1000
http://bitbucket.org/pypy/pypy/changeset/6ffd3556369a/
Log: make TranslatorDriver.from_targetspec a classmethod
diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py
--- a/rpython/translator/driver.py
+++ b/rpython/translator/driver.py
@@ -552,16 +552,16 @@
self.log.info('usession directory: %s' % (udir,))
return result
- @staticmethod
- def from_targetspec(targetspec_dic, config=None, args=None,
+ @classmethod
+ def from_targetspec(cls, targetspec_dic, config=None, args=None,
empty_translator=None,
disable=[],
default_goal=None):
if args is None:
args = []
- driver = TranslationDriver(config=config, default_goal=default_goal,
- disable=disable)
+ driver = cls(config=config, default_goal=default_goal,
+ disable=disable)
target = targetspec_dic['target']
spec = target(driver, args)
From pypy.commits at gmail.com Mon May 9 03:20:55 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 00:20:55 -0700 (PDT)
Subject: [pypy-commit] pypy default: A test (passing on -A). Its failure
could be related to the next lxml
Message-ID: <57303a57.41c8c20a.182ff.ffff9fbd@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84319:ad7a580821ea
Date: 2016-05-09 09:21 +0200
http://bitbucket.org/pypy/pypy/changeset/ad7a580821ea/
Log: A test (passing on -A). Its failure could be related to the next
lxml crash.
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -921,3 +921,57 @@
' multiple bases have instance lay-out conflict')
else:
raise AssertionError("did not get TypeError!")
+
+ def test_call_tp_dealloc_when_created_from_python(self):
+ import gc
+ module = self.import_extension('foo', [
+ ("fetchFooType", "METH_VARARGS",
+ """
+ PyObject *o;
+ Foo_Type.tp_dealloc = &dealloc_foo;
+ Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
+ Foo_Type.tp_new = &new_foo;
+ Foo_Type.tp_free = &PyObject_Del;
+ if (PyType_Ready(&Foo_Type) < 0) return NULL;
+
+ o = PyObject_New(PyObject, &Foo_Type);
+ Py_DECREF(o); /* calls dealloc_foo immediately */
+
+ Py_INCREF(&Foo_Type);
+ return (PyObject *)&Foo_Type;
+ """),
+ ("getCounter", "METH_VARARGS",
+ """
+ return PyInt_FromLong(foo_dealloc_counter);
+ """)], prologue=
+ """
+ static int foo_dealloc_counter = -1;
+ static void dealloc_foo(PyObject *foo) {
+ foo_dealloc_counter++;
+ }
+ static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k)
+ {
+ return t->tp_alloc(t, 0);
+ }
+ static PyTypeObject Foo_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "foo.foo",
+ };
+ """)
+ Foo = module.fetchFooType()
+ assert module.getCounter() == 0
+ Foo(); Foo()
+ for i in range(10):
+ if module.getCounter() >= 2:
+ break
+ gc.collect()
+ assert module.getCounter() == 2
+ #
+ class Bar(Foo):
+ pass
+ Bar(); Bar()
+ for i in range(10):
+ if module.getCounter() >= 4:
+ break
+ gc.collect()
+ assert module.getCounter() == 4
From pypy.commits at gmail.com Mon May 9 03:22:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 00:22:43 -0700 (PDT)
Subject: [pypy-commit] pypy default: Use an xfail, to be nice
Message-ID: <57303ac3.2472c20a.acfef.ffffa516@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84320:93c3da9adab4
Date: 2016-05-09 09:23 +0200
http://bitbucket.org/pypy/pypy/changeset/93c3da9adab4/
Log: Use an xfail, to be nice
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -922,6 +922,7 @@
else:
raise AssertionError("did not get TypeError!")
+ @pytest.mark.xfail
def test_call_tp_dealloc_when_created_from_python(self):
import gc
module = self.import_extension('foo', [
From pypy.commits at gmail.com Mon May 9 03:30:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 00:30:42 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix the test. Now it fails only for
subclasses, which is what I
Message-ID: <57303ca2.a553c20a.33b82.ffff9cd4@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84321:4e12001044f0
Date: 2016-05-09 09:30 +0200
http://bitbucket.org/pypy/pypy/changeset/4e12001044f0/
Log: Fix the test. Now it fails only for subclasses, which is what I
originally expected
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -924,7 +924,6 @@
@pytest.mark.xfail
def test_call_tp_dealloc_when_created_from_python(self):
- import gc
module = self.import_extension('foo', [
("fetchFooType", "METH_VARARGS",
"""
@@ -965,7 +964,9 @@
for i in range(10):
if module.getCounter() >= 2:
break
- gc.collect()
+ # NB. use self.debug_collect() instead of gc.collect(),
+ # otherwise rawrefcount's dealloc callback doesn't trigger
+ self.debug_collect()
assert module.getCounter() == 2
#
class Bar(Foo):
@@ -974,5 +975,5 @@
for i in range(10):
if module.getCounter() >= 4:
break
- gc.collect()
+ self.debug_collect()
assert module.getCounter() == 4
From pypy.commits at gmail.com Mon May 9 04:07:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:07:39 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix for 4e12001044f0
Message-ID: <5730454b.235ec20a.77dcd.ffff8b06@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84322:979d7c8fcf6b
Date: 2016-05-09 10:04 +0200
http://bitbucket.org/pypy/pypy/changeset/979d7c8fcf6b/
Log: Fix for 4e12001044f0
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -922,7 +922,6 @@
else:
raise AssertionError("did not get TypeError!")
- @pytest.mark.xfail
def test_call_tp_dealloc_when_created_from_python(self):
module = self.import_extension('foo', [
("fetchFooType", "METH_VARARGS",
@@ -942,15 +941,16 @@
"""),
("getCounter", "METH_VARARGS",
"""
- return PyInt_FromLong(foo_dealloc_counter);
+ return PyInt_FromLong(foo_counter);
""")], prologue=
"""
- static int foo_dealloc_counter = -1;
+ static int foo_counter = 1000;
static void dealloc_foo(PyObject *foo) {
- foo_dealloc_counter++;
+ foo_counter += 10;
}
static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k)
{
+ foo_counter += 1000;
return t->tp_alloc(t, 0);
}
static PyTypeObject Foo_Type = {
@@ -959,21 +959,21 @@
};
""")
Foo = module.fetchFooType()
- assert module.getCounter() == 0
+ assert module.getCounter() == 1010
Foo(); Foo()
for i in range(10):
- if module.getCounter() >= 2:
+ if module.getCounter() >= 3030:
break
# NB. use self.debug_collect() instead of gc.collect(),
# otherwise rawrefcount's dealloc callback doesn't trigger
self.debug_collect()
- assert module.getCounter() == 2
+ assert module.getCounter() == 3030
#
class Bar(Foo):
pass
Bar(); Bar()
for i in range(10):
- if module.getCounter() >= 4:
+ if module.getCounter() >= 5050:
break
self.debug_collect()
- assert module.getCounter() == 4
+ assert module.getCounter() == 5050
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -556,7 +556,14 @@
typedescr = get_typedescr(w_type.layout.typedef)
# dealloc
- pto.c_tp_dealloc = typedescr.get_dealloc(space)
+ if space.gettypeobject(w_type.layout.typedef) is w_type:
+ # only for the exact type, like 'space.w_tuple' or 'space.w_list'
+ pto.c_tp_dealloc = typedescr.get_dealloc(space)
+ else:
+ # for all subtypes, use subtype_dealloc()
+ pto.c_tp_dealloc = llhelper(
+ subtype_dealloc.api_func.functype,
+ subtype_dealloc.api_func.get_wrapper(space))
# buffer protocol
if space.is_w(w_type, space.w_str):
setup_string_buffer_procs(space, pto)
From pypy.commits at gmail.com Mon May 9 04:09:22 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 09 May 2016 01:09:22 -0700 (PDT)
Subject: [pypy-commit] buildbot default: moved py3k 1h later (at 4 o'clock)
and running py3.5 nightlies at 3 o'clock
Message-ID: <573045b2.cbb81c0a.e1563.ffff8e1f@mx.google.com>
Author: Richard Plangger
Branch:
Changeset: r1003:60db764e3567
Date: 2016-05-09 10:03 +0200
http://bitbucket.org/pypy/buildbot/changeset/60db764e3567/
Log: moved py3k 1h later (at 4 o'clock) and running py3.5 nightlies at 3
o'clock
diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py
--- a/bot2/pypybuildbot/master.py
+++ b/bot2/pypybuildbot/master.py
@@ -304,10 +304,13 @@
NUMPY_WIN, # on allegro_win32, SalsaSalsa
]),
+ Nightly("nightly-3-01-py3.5", [LINUX64, JITLINUX64,],
+ branch="py3.5", hour=3, minute=0),
+
Nightly("nightly-3-00-py3k", [
LINUX64, # on bencher4, uses all cores
JITLINUX64, # on bencher4, uses 1 core
- ], branch="py3k", hour=3, minute=0),
+ ], branch="py3k", hour=4, minute=0),
# S390X vm (ibm-research)
Nightly("nightly-4-00", [LINUX_S390X], branch='default', hour=0, minute=0),
From pypy.commits at gmail.com Mon May 9 04:27:50 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:27:50 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: Documentation (thanks cfbolz)
Message-ID: <57304a06.d2711c0a.9e252.ffff9d9b@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84323:837ed78ee722
Date: 2016-05-09 10:28 +0200
http://bitbucket.org/pypy/pypy/changeset/837ed78ee722/
Log: Documentation (thanks cfbolz)
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -33,26 +33,25 @@
it from a finalizer. A finalizer runs earlier, and in topological
order; care must be taken that the object might still be reachable at
this point if we're clever enough. A destructor on the other hand runs
-last; nothing can be done with the object any more.
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
Destructors
-----------
A destructor is an RPython ``__del__()`` method that is called directly
-by the GC when there is no more reference to an object. Intended for
-objects that just need to free a block of raw memory or close a file.
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
There are restrictions on the kind of code you can put in ``__del__()``,
including all other functions called by it. These restrictions are
-checked. In particular you cannot access fields containing GC objects;
-and if you call an external C function, it must be a "safe" function
-(e.g. not releasing the GIL; use ``releasegil=False`` in
-``rffi.llexternal()``).
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
-If there are several objects with destructors that die during the same
-GC cycle, they are called in a completely random order --- but that
-should not matter because destructors cannot do much anyway.
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
Register_finalizer
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -548,11 +548,15 @@
def gc_disabled(self, w_obj):
# If we're running in 'gc.disable()' mode, record w_obj in the
- # "call me later" list and return True. Use this function
- # from _finalize_() methods that would call app-level some
- # things that we consider shouldn't be called in gc.disable().
- # (The exact definition is of course a bit vague, but most
- # importantly this includes all user-level __del__().)
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
pdd = self.pending_with_disabled_del
if pdd is None:
return False
From pypy.commits at gmail.com Mon May 9 04:29:30 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:29:30 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: hg merge default
Message-ID: <57304a6a.4d571c0a.25fa4.ffffa230@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84324:7b8178ec0f5b
Date: 2016-05-09 10:29 +0200
http://bitbucket.org/pypy/pypy/changeset/7b8178ec0f5b/
Log: hg merge default
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -68,3 +68,19 @@
CPython).
.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
+
+.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
+.. branch: cpyext-more-slots
+
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -688,3 +688,21 @@
def f(x): pass
e = raises(TypeError, "f(**{u'ü' : 19})")
assert "?" in str(e.value)
+
+ def test_starstarargs_dict_subclass(self):
+ def f(**kwargs):
+ return kwargs
+ class DictSubclass(dict):
+ def __iter__(self):
+ yield 'x'
+ # CPython, as an optimization, looks directly into dict internals when
+ # passing one via **kwargs.
+ x =DictSubclass()
+ assert f(**x) == {}
+ x['a'] = 1
+ assert f(**x) == {'a': 1}
+
+ def test_starstarargs_module_dict(self):
+ def f(**kwargs):
+ return kwargs
+ assert f(**globals()) == globals()
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -71,7 +71,7 @@
def nonzero(self):
with self as ptr:
- nonzero = bool(ptr)
+ nonzero = self.ctype.nonzero(ptr)
return self.space.wrap(nonzero)
def int(self, space):
@@ -365,8 +365,16 @@
return self.ctype.size
def with_gc(self, w_destructor):
+ space = self.space
+ if space.is_none(w_destructor):
+ if isinstance(self, W_CDataGCP):
+ self.w_destructor = None
+ return space.w_None
+ raise oefmt(space.w_TypeError,
+ "Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
with self as ptr:
- return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor)
+ return W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
def unpack(self, length):
from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
@@ -527,7 +535,7 @@
class W_CDataGCP(W_CData):
"""For ffi.gc()."""
_attrs_ = ['w_original_cdata', 'w_destructor']
- _immutable_fields_ = ['w_original_cdata', 'w_destructor']
+ _immutable_fields_ = ['w_original_cdata']
def __init__(self, space, cdata, ctype, w_original_cdata, w_destructor):
W_CData.__init__(self, space, cdata, ctype)
@@ -536,7 +544,10 @@
self.register_finalizer(space)
def _finalize_(self):
- self.space.call_function(self.w_destructor, self.w_original_cdata)
+ w_destructor = self.w_destructor
+ if w_destructor is not None:
+ self.w_destructor = None
+ self.space.call_function(w_destructor, self.w_original_cdata)
W_CData.typedef = TypeDef(
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -147,6 +147,9 @@
raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number",
self.name)
+ def nonzero(self, cdata):
+ return bool(cdata)
+
def insert_name(self, extra, extra_position):
name = '%s%s%s' % (self.name[:self.name_position],
extra,
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -93,6 +93,18 @@
return self.space.newlist_int(result)
return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length)
+ def nonzero(self, cdata):
+ if self.size <= rffi.sizeof(lltype.Signed):
+ value = misc.read_raw_long_data(cdata, self.size)
+ return value != 0
+ else:
+ return self._nonzero_longlong(cdata)
+
+ def _nonzero_longlong(self, cdata):
+ # in its own function: LONGLONG may make the whole function jit-opaque
+ value = misc.read_raw_signed_data(cdata, self.size)
+ return bool(value)
+
class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive):
_attrs_ = []
@@ -435,6 +447,9 @@
return self.space.newlist_float(result)
return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length)
+ def nonzero(self, cdata):
+ return misc.is_nonnull_float(cdata, self.size)
+
class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat):
_attrs_ = []
@@ -501,3 +516,7 @@
rffi.LONGDOUBLE, rffi.LONGDOUBLEP)
return True
return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob)
+
+ @jit.dont_look_inside
+ def nonzero(self, cdata):
+ return misc.is_nonnull_longdouble(cdata)
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -256,7 +256,7 @@
def is_nonnull_longdouble(cdata):
return _is_nonnull_longdouble(read_raw_longdouble_data(cdata))
def is_nonnull_float(cdata, size):
- return read_raw_float_data(cdata, size) != 0.0
+ return read_raw_float_data(cdata, size) != 0.0 # note: True if a NaN
def object_as_bool(space, w_ob):
# convert and cast a Python object to a boolean. Accept an integer
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -141,9 +141,13 @@
INF = 1E200 * 1E200
for name in ["float", "double"]:
p = new_primitive_type(name)
- assert bool(cast(p, 0))
+ assert bool(cast(p, 0)) is False # since 1.7
+ assert bool(cast(p, -0.0)) is False # since 1.7
+ assert bool(cast(p, 1e-42)) is True
+ assert bool(cast(p, -1e-42)) is True
assert bool(cast(p, INF))
assert bool(cast(p, -INF))
+ assert bool(cast(p, float("nan")))
assert int(cast(p, -150)) == -150
assert int(cast(p, 61.91)) == 61
assert long(cast(p, 61.91)) == 61
@@ -202,7 +206,8 @@
def test_character_type():
p = new_primitive_type("char")
- assert bool(cast(p, '\x00'))
+ assert bool(cast(p, 'A')) is True
+ assert bool(cast(p, '\x00')) is False # since 1.7
assert cast(p, '\x00') != cast(p, -17*256)
assert int(cast(p, 'A')) == 65
assert long(cast(p, 'A')) == 65
@@ -2558,7 +2563,8 @@
BBoolP = new_pointer_type(BBool)
assert int(cast(BBool, False)) == 0
assert int(cast(BBool, True)) == 1
- assert bool(cast(BBool, False)) is True # warning!
+ assert bool(cast(BBool, False)) is False # since 1.7
+ assert bool(cast(BBool, True)) is True
assert int(cast(BBool, 3)) == 1
assert int(cast(BBool, long(3))) == 1
assert int(cast(BBool, long(10)**4000)) == 1
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -331,6 +331,25 @@
gc.collect()
assert seen == [1]
+ def test_ffi_gc_disable(self):
+ import _cffi_backend as _cffi1_backend
+ ffi = _cffi1_backend.FFI()
+ p = ffi.new("int *", 123)
+ raises(TypeError, ffi.gc, p, None)
+ seen = []
+ q1 = ffi.gc(p, lambda p: seen.append(1))
+ q2 = ffi.gc(q1, lambda p: seen.append(2))
+ import gc; gc.collect()
+ assert seen == []
+ assert ffi.gc(q1, None) is None
+ del q1, q2
+ for i in range(5):
+ if seen:
+ break
+ import gc
+ gc.collect()
+ assert seen == [2]
+
def test_ffi_new_allocator_1(self):
import _cffi_backend as _cffi1_backend
ffi = _cffi1_backend.FFI()
diff --git a/pypy/module/_multibytecodec/app_multibytecodec.py b/pypy/module/_multibytecodec/app_multibytecodec.py
--- a/pypy/module/_multibytecodec/app_multibytecodec.py
+++ b/pypy/module/_multibytecodec/app_multibytecodec.py
@@ -44,8 +44,10 @@
self, data))
def reset(self):
- self.stream.write(MultibyteIncrementalEncoder.encode(
- self, '', final=True))
+ data = MultibyteIncrementalEncoder.encode(
+ self, '', final=True)
+ if len(data) > 0:
+ self.stream.write(data)
MultibyteIncrementalEncoder.reset(self)
def writelines(self, lines):
diff --git a/pypy/module/_multibytecodec/test/test_app_stream.py b/pypy/module/_multibytecodec/test/test_app_stream.py
--- a/pypy/module/_multibytecodec/test/test_app_stream.py
+++ b/pypy/module/_multibytecodec/test/test_app_stream.py
@@ -90,3 +90,15 @@
w.write(u'\u304b')
w.write(u'\u309a')
assert w.stream.output == ['\x83m', '', '\x82\xf5']
+
+ def test_writer_seek_no_empty_write(self):
+ # issue #2293: codecs.py will sometimes issue a reset()
+ # on a StreamWriter attached to a file that is not opened
+ # for writing at all. We must not emit a "write('')"!
+ class FakeFile:
+ def write(self, data):
+ raise IOError("can't write!")
+ #
+ w = self.ShiftJisx0213StreamWriter(FakeFile())
+ w.reset()
+ # assert did not crash
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -248,7 +248,7 @@
w_signature = rffi.charp2str(signature)
return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc,
check_return, w_signature)
-
+
def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc,
check_return, w_signature):
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -374,7 +374,75 @@
header = pypy_decl
if mangle_name('', typedef.name) is None:
header = None
- if name == 'tp_setattro':
+ handled = False
+ # unary functions
+ for tp_name, attr in [('tp_as_number.c_nb_int', '__int__'),
+ ('tp_as_number.c_nb_long', '__long__'),
+ ('tp_as_number.c_nb_float', '__float__'),
+ ('tp_as_number.c_nb_negative', '__neg__'),
+ ('tp_as_number.c_nb_positive', '__pos__'),
+ ('tp_as_number.c_nb_absolute', '__abs__'),
+ ('tp_as_number.c_nb_invert', '__invert__'),
+ ('tp_as_number.c_nb_index', '__index__'),
+ ('tp_str', '__str__'),
+ ('tp_repr', '__repr__'),
+ ('tp_iter', '__iter__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self):
+ return space.call_function(slot_fn, w_self)
+ api_func = slot_func.api_func
+ handled = True
+
+ # binary functions
+ for tp_name, attr in [('tp_as_number.c_nb_add', '__add__'),
+ ('tp_as_number.c_nb_subtract', '__subtract__'),
+ ('tp_as_number.c_nb_multiply', '__mul__'),
+ ('tp_as_number.c_nb_divide', '__div__'),
+ ('tp_as_number.c_nb_remainder', '__mod__'),
+ ('tp_as_number.c_nb_divmod', '__divmod__'),
+ ('tp_as_number.c_nb_lshift', '__lshift__'),
+ ('tp_as_number.c_nb_rshift', '__rshift__'),
+ ('tp_as_number.c_nb_and', '__and__'),
+ ('tp_as_number.c_nb_xor', '__xor__'),
+ ('tp_as_number.c_nb_or', '__or__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg):
+ return space.call_function(slot_fn, w_self, w_arg)
+ api_func = slot_func.api_func
+ handled = True
+
+ # ternary functions
+ for tp_name, attr in [('tp_as_number.c_nb_power', ''),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg1, w_arg2):
+ return space.call_function(slot_fn, w_self, w_arg1, w_arg2)
+ api_func = slot_func.api_func
+ handled = True
+
+ if handled:
+ pass
+ elif name == 'tp_setattro':
setattr_fn = w_type.getdictvalue(space, '__setattr__')
delattr_fn = w_type.getdictvalue(space, '__delattr__')
if setattr_fn is None:
@@ -401,28 +469,6 @@
return space.call_function(getattr_fn, w_self, w_name)
api_func = slot_tp_getattro.api_func
- elif name == 'tp_as_number.c_nb_int':
- int_fn = w_type.getdictvalue(space, '__int__')
- if int_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_int(space, w_self):
- return space.call_function(int_fn, w_self)
- api_func = slot_nb_int.api_func
-
- elif name == 'tp_as_number.c_nb_float':
- float_fn = w_type.getdictvalue(space, '__float__')
- if float_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_float(space, w_self):
- return space.call_function(float_fn, w_self)
- api_func = slot_nb_float.api_func
-
elif name == 'tp_call':
call_fn = w_type.getdictvalue(space, '__call__')
if call_fn is None:
@@ -436,28 +482,6 @@
return space.call_args(call_fn, args)
api_func = slot_tp_call.api_func
- elif name == 'tp_str':
- str_fn = w_type.getdictvalue(space, '__str__')
- if str_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_str(space, w_self):
- return space.call_function(str_fn, w_self)
- api_func = slot_tp_str.api_func
-
- elif name == 'tp_iter':
- iter_fn = w_type.getdictvalue(space, '__iter__')
- if iter_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_iter(space, w_self):
- return space.call_function(iter_fn, w_self)
- api_func = slot_tp_iter.api_func
-
elif name == 'tp_iternext':
iternext_fn = w_type.getdictvalue(space, 'next')
if iternext_fn is None:
@@ -501,6 +525,7 @@
return space.call_args(space.get(new_fn, w_self), args)
api_func = slot_tp_new.api_func
else:
+ # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce
return
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root
from pypy.module.cpyext.state import State
@@ -100,7 +100,8 @@
PyPy_TypedefTest2(space, ppos)
lltype.free(ppos, flavor='raw')
-
+ at pytest.mark.skipif(os.environ.get('USER')=='root',
+ reason='root can write to all files')
def test_copy_header_files(tmpdir):
api.copy_header_files(tmpdir, True)
def check(name):
diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py
--- a/pypy/module/cpyext/test/test_borrow.py
+++ b/pypy/module/cpyext/test/test_borrow.py
@@ -12,13 +12,13 @@
PyObject *t = PyTuple_New(1);
PyObject *f = PyFloat_FromDouble(42.0);
PyObject *g = NULL;
- printf("Refcnt1: %i\\n", f->ob_refcnt);
+ printf("Refcnt1: %zd\\n", f->ob_refcnt);
PyTuple_SetItem(t, 0, f); // steals reference
- printf("Refcnt2: %i\\n", f->ob_refcnt);
+ printf("Refcnt2: %zd\\n", f->ob_refcnt);
f = PyTuple_GetItem(t, 0); // borrows reference
- printf("Refcnt3: %i\\n", f->ob_refcnt);
+ printf("Refcnt3: %zd\\n", f->ob_refcnt);
g = PyTuple_GetItem(t, 0); // borrows reference again
- printf("Refcnt4: %i\\n", f->ob_refcnt);
+ printf("Refcnt4: %zd\\n", f->ob_refcnt);
printf("COMPARE: %i\\n", f == g);
fflush(stdout);
Py_DECREF(t);
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -40,7 +40,7 @@
#endif
if(s->ob_type->tp_basicsize != expected_size)
{
- printf("tp_basicsize==%ld\\n", s->ob_type->tp_basicsize);
+ printf("tp_basicsize==%zd\\n", s->ob_type->tp_basicsize);
result = 0;
}
Py_DECREF(s);
@@ -162,7 +162,10 @@
module = self.import_extension('foo', [
("string_None", "METH_VARARGS",
'''
- return PyString_AsString(Py_None);
+ if (PyString_AsString(Py_None)) {
+ Py_RETURN_NONE;
+ }
+ return NULL;
'''
)])
raises(TypeError, module.string_None)
diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py
--- a/pypy/module/cpyext/test/test_classobject.py
+++ b/pypy/module/cpyext/test/test_classobject.py
@@ -29,7 +29,6 @@
assert space.unwrap(space.getattr(w_instance, space.wrap('x'))) == 1
assert space.unwrap(space.getattr(w_instance, space.wrap('y'))) == 2
assert space.unwrap(space.getattr(w_instance, space.wrap('args'))) == (3,)
-
def test_lookup(self, space, api):
w_instance = space.appexec([], """():
@@ -68,7 +67,7 @@
("get_classtype", "METH_NOARGS",
"""
Py_INCREF(&PyClass_Type);
- return &PyClass_Type;
+ return (PyObject*)&PyClass_Type;
""")])
class C: pass
assert module.get_classtype() is type(C)
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -72,8 +72,7 @@
else:
kwds["link_files"] = [str(api_library + '.so')]
if sys.platform.startswith('linux'):
- kwds["compile_extra"]=["-Werror=implicit-function-declaration",
- "-g", "-O0"]
+ kwds["compile_extra"]=["-Werror", "-g", "-O0"]
kwds["link_extra"]=["-g"]
modname = modname.split('.')[-1]
@@ -747,7 +746,7 @@
refcnt_after = true_obj->ob_refcnt;
Py_DECREF(true_obj);
Py_DECREF(true_obj);
- fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after);
+ fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after);
return PyBool_FromLong(refcnt_after == refcnt + 2);
}
static PyObject* foo_bar(PyObject* self, PyObject *args)
@@ -763,7 +762,7 @@
return NULL;
refcnt_after = true_obj->ob_refcnt;
Py_DECREF(tup);
- fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after,
+ fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after,
true_obj->ob_refcnt);
return PyBool_FromLong(refcnt_after == refcnt + 1 &&
refcnt == true_obj->ob_refcnt);
diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -171,7 +171,7 @@
int little_endian, is_signed;
if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed))
return NULL;
- return _PyLong_FromByteArray("\x9A\xBC", 2,
+ return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC", 2,
little_endian, is_signed);
"""),
])
@@ -187,7 +187,7 @@
int little_endian, is_signed;
if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed))
return NULL;
- return _PyLong_FromByteArray("\x9A\xBC\x41", 3,
+ return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC\x41", 3,
little_endian, is_signed);
"""),
])
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -168,14 +168,14 @@
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
- if (val->ob_type != PyExc_TypeError)
+ if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
/* Normalize again */
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
- if (val->ob_type != PyExc_TypeError)
+ if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
PyErr_Restore(type, val, tb);
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -437,14 +437,14 @@
("test_tp_getattro", "METH_VARARGS",
'''
PyObject *name, *obj = PyTuple_GET_ITEM(args, 0);
- PyIntObject *attr, *value = PyTuple_GET_ITEM(args, 1);
+ PyIntObject *attr, *value = (PyIntObject*) PyTuple_GET_ITEM(args, 1);
if (!obj->ob_type->tp_getattro)
{
PyErr_SetString(PyExc_ValueError, "missing tp_getattro");
return NULL;
}
name = PyString_FromString("attr1");
- attr = obj->ob_type->tp_getattro(obj, name);
+ attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name);
if (attr->ob_ival != value->ob_ival)
{
PyErr_SetString(PyExc_ValueError,
@@ -454,7 +454,7 @@
Py_DECREF(name);
Py_DECREF(attr);
name = PyString_FromString("attr2");
- attr = obj->ob_type->tp_getattro(obj, name);
+ attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name);
if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError))
{
PyErr_Clear();
@@ -758,8 +758,9 @@
} IntLikeObject;
static int
- intlike_nb_nonzero(IntLikeObject *v)
+ intlike_nb_nonzero(PyObject *o)
{
+ IntLikeObject *v = (IntLikeObject*)o;
if (v->value == -42) {
PyErr_SetNone(PyExc_ValueError);
return -1;
@@ -920,3 +921,59 @@
' multiple bases have instance lay-out conflict')
else:
raise AssertionError("did not get TypeError!")
+
+ def test_call_tp_dealloc_when_created_from_python(self):
+ module = self.import_extension('foo', [
+ ("fetchFooType", "METH_VARARGS",
+ """
+ PyObject *o;
+ Foo_Type.tp_dealloc = &dealloc_foo;
+ Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
+ Foo_Type.tp_new = &new_foo;
+ Foo_Type.tp_free = &PyObject_Del;
+ if (PyType_Ready(&Foo_Type) < 0) return NULL;
+
+ o = PyObject_New(PyObject, &Foo_Type);
+ Py_DECREF(o); /* calls dealloc_foo immediately */
+
+ Py_INCREF(&Foo_Type);
+ return (PyObject *)&Foo_Type;
+ """),
+ ("getCounter", "METH_VARARGS",
+ """
+ return PyInt_FromLong(foo_counter);
+ """)], prologue=
+ """
+ static int foo_counter = 1000;
+ static void dealloc_foo(PyObject *foo) {
+ foo_counter += 10;
+ }
+ static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k)
+ {
+ foo_counter += 1000;
+ return t->tp_alloc(t, 0);
+ }
+ static PyTypeObject Foo_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "foo.foo",
+ };
+ """)
+ Foo = module.fetchFooType()
+ assert module.getCounter() == 1010
+ Foo(); Foo()
+ for i in range(10):
+ if module.getCounter() >= 3030:
+ break
+ # NB. use self.debug_collect() instead of gc.collect(),
+ # otherwise rawrefcount's dealloc callback doesn't trigger
+ self.debug_collect()
+ assert module.getCounter() == 3030
+ #
+ class Bar(Foo):
+ pass
+ Bar(); Bar()
+ for i in range(10):
+ if module.getCounter() >= 5050:
+ break
+ self.debug_collect()
+ assert module.getCounter() == 5050
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -556,7 +556,14 @@
typedescr = get_typedescr(w_type.layout.typedef)
# dealloc
- pto.c_tp_dealloc = typedescr.get_dealloc(space)
+ if space.gettypeobject(w_type.layout.typedef) is w_type:
+ # only for the exact type, like 'space.w_tuple' or 'space.w_list'
+ pto.c_tp_dealloc = typedescr.get_dealloc(space)
+ else:
+ # for all subtypes, use subtype_dealloc()
+ pto.c_tp_dealloc = llhelper(
+ subtype_dealloc.api_func.functype,
+ subtype_dealloc.api_func.get_wrapper(space))
# buffer protocol
if space.is_w(w_type, space.w_str):
setup_string_buffer_procs(space, pto)
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -258,7 +258,6 @@
elif space.is_w(w_idx, space.w_None):
return [NewAxisChunk(), EllipsisChunk()]
result = []
- i = 0
has_ellipsis = False
has_filter = False
for w_item in space.fixedview(w_idx):
@@ -274,7 +273,6 @@
result.append(NewAxisChunk())
elif space.isinstance_w(w_item, space.w_slice):
result.append(SliceChunk(w_item))
- i += 1
elif isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool():
if has_filter:
# in CNumPy, the support for this is incomplete
@@ -287,7 +285,6 @@
result.append(IntegerChunk(w_item.descr_int(space)))
else:
result.append(IntegerChunk(w_item))
- i += 1
if not has_ellipsis:
result.append(EllipsisChunk())
return result
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -199,7 +199,7 @@
reds='auto')
def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args):
- # out must hav been built. func needs no calc_type, is usually an
+ # out must have been built. func needs no calc_type, is usually an
# external ufunc
nin = len(in_args)
in_iters = [None] * nin
@@ -806,7 +806,6 @@
indexlen = len(indexes_w)
dtype = arr.get_dtype()
iter = PureShapeIter(iter_shape, indexes_w)
- indexlen = len(indexes_w)
while not iter.done():
getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen,
dtype=dtype, prefixlen=prefixlen)
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -231,11 +231,11 @@
dim = i
idx = c.w_idx
chunks.pop(i)
- chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
+ chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
space.w_None, space.w_None)))
break
if dim > 0:
- view = self.implementation.swapaxes(space, self, 0, dim)
+ view = self.implementation.swapaxes(space, self, 0, dim)
if dim >= 0:
view = new_view(space, self, chunks)
view.setitem_filter(space, idx, val_arr)
@@ -563,7 +563,7 @@
l_w = []
for i in range(self.get_shape()[0]):
item_w = self.descr_getitem(space, space.wrap(i))
- if (isinstance(item_w, W_NDimArray) or
+ if (isinstance(item_w, W_NDimArray) or
isinstance(item_w, boxes.W_GenericBox)):
l_w.append(space.call_method(item_w, "tolist"))
else:
@@ -740,7 +740,7 @@
space.str_w(self.get_dtype().descr_repr(space)),
space.str_w(new_dtype.descr_repr(space)), casting)
order = order_converter(space, space.wrap(order), self.get_order())
- if (not copy and new_dtype == self.get_dtype()
+ if (not copy and new_dtype == self.get_dtype()
and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order())
and (subok or type(self) is W_NDimArray)):
return self
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -1,14 +1,13 @@
from pypy.interpreter.error import oefmt
from rpython.rlib import jit
-from pypy.module.micronumpy import support, constants as NPY
+from pypy.module.micronumpy import constants as NPY
from pypy.module.micronumpy.base import W_NDimArray
# structures to describe slicing
class BaseChunk(object):
- _attrs_ = ['step','out_dim']
- pass
+ _attrs_ = ['step', 'out_dim']
class Chunk(BaseChunk):
@@ -36,6 +35,7 @@
class IntegerChunk(BaseChunk):
input_dim = 1
out_dim = 0
+
def __init__(self, w_idx):
self.w_idx = w_idx
@@ -70,6 +70,7 @@
class EllipsisChunk(BaseChunk):
input_dim = 0
out_dim = 0
+
def __init__(self):
pass
@@ -80,6 +81,7 @@
class BooleanChunk(BaseChunk):
input_dim = 1
out_dim = 1
+
def __init__(self, w_idx):
self.w_idx = w_idx
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -1521,7 +1521,7 @@
# Instantiated in cpyext/ndarrayobject. It is here since ufunc calls
# set_dims_and_steps, otherwise ufunc, ndarrayobject would have circular
# imports
-npy_intpp = rffi.LONGP
+npy_intpp = rffi.INTPTR_T
LONG_SIZE = LONG_BIT / 8
CCHARP_SIZE = _get_bitsize('P') / 8
diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py
--- a/pypy/module/select/interp_epoll.py
+++ b/pypy/module/select/interp_epoll.py
@@ -53,6 +53,10 @@
EPOLL_CTL_MOD = cconfig["EPOLL_CTL_MOD"]
EPOLL_CTL_DEL = cconfig["EPOLL_CTL_DEL"]
+DEF_REGISTER_EVENTMASK = (public_symbols["EPOLLIN"] |
+ public_symbols["EPOLLOUT"] |
+ public_symbols["EPOLLPRI"])
+
epoll_create = rffi.llexternal(
"epoll_create", [rffi.INT], rffi.INT, compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO
@@ -133,7 +137,7 @@
self.close()
@unwrap_spec(eventmask=int)
- def descr_register(self, space, w_fd, eventmask=-1):
+ def descr_register(self, space, w_fd, eventmask=DEF_REGISTER_EVENTMASK):
self.check_closed(space)
self.epoll_ctl(space, EPOLL_CTL_ADD, w_fd, eventmask)
@@ -142,7 +146,7 @@
self.epoll_ctl(space, EPOLL_CTL_DEL, w_fd, 0, ignore_ebadf=True)
@unwrap_spec(eventmask=int)
- def descr_modify(self, space, w_fd, eventmask=-1):
+ def descr_modify(self, space, w_fd, eventmask):
self.check_closed(space)
self.epoll_ctl(space, EPOLL_CTL_MOD, w_fd, eventmask)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
@@ -56,7 +56,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -285,7 +285,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -326,7 +328,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
@@ -1523,21 +1529,30 @@
import gc; gc.collect(); gc.collect(); gc.collect()
assert seen == [3]
+ def test_gc_disable(self):
+ ffi = FFI(backend=self.Backend())
+ p = ffi.new("int *", 123)
+ py.test.raises(TypeError, ffi.gc, p, None)
+ seen = []
+ q1 = ffi.gc(p, lambda p: seen.append(1))
+ q2 = ffi.gc(q1, lambda p: seen.append(2))
+ import gc; gc.collect()
+ assert seen == []
+ assert ffi.gc(q1, None) is None
+ del q1, q2
+ import gc; gc.collect(); gc.collect(); gc.collect()
+ assert seen == [2]
+
def test_gc_finite_list(self):
ffi = FFI(backend=self.Backend())
- public = not hasattr(ffi._backend, 'gcp')
p = ffi.new("int *", 123)
keepalive = []
for i in range(10):
keepalive.append(ffi.gc(p, lambda p: None))
- if public:
- assert len(ffi.gc_weakrefs.data) == i + 1
del keepalive[:]
import gc; gc.collect(); gc.collect()
for i in range(10):
keepalive.append(ffi.gc(p, lambda p: None))
- if public:
- assert len(ffi.gc_weakrefs.data) == 10
def test_CData_CType(self):
ffi = FFI(backend=self.Backend())
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
@@ -467,12 +467,12 @@
def test_introspect_order(self):
ffi = FFI()
- ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
- ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;")
- ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
+ ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
+ ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
def test_unpack(self):
ffi = FFI()
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
@@ -139,7 +139,7 @@
max = int(max)
p = ffi.cast(c_decl, min)
assert p != min # no __eq__(int)
- assert bool(p) is True
+ assert bool(p) is bool(min)
assert int(p) == min
p = ffi.cast(c_decl, max)
assert int(p) == max
@@ -351,7 +351,9 @@
assert ffi.new("char*", b"\xff")[0] == b'\xff'
assert ffi.new("char*")[0] == b'\x00'
assert int(ffi.cast("char", 300)) == 300 - 256
- assert bool(ffi.cast("char", 0))
+ assert not bool(ffi.cast("char", 0))
+ assert bool(ffi.cast("char", 1))
+ assert bool(ffi.cast("char", 255))
py.test.raises(TypeError, ffi.new, "char*", 32)
py.test.raises(TypeError, ffi.new, "char*", u+"x")
py.test.raises(TypeError, ffi.new, "char*", b"foo")
@@ -391,7 +393,11 @@
py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345')
assert ffi.new("wchar_t*")[0] == u+'\x00'
assert int(ffi.cast("wchar_t", 300)) == 300
- assert bool(ffi.cast("wchar_t", 0))
+ assert not bool(ffi.cast("wchar_t", 0))
+ assert bool(ffi.cast("wchar_t", 1))
+ assert bool(ffi.cast("wchar_t", 65535))
+ if SIZE_OF_WCHAR > 2:
+ assert bool(ffi.cast("wchar_t", 65536))
py.test.raises(TypeError, ffi.new, "wchar_t*", 32)
py.test.raises(TypeError, ffi.new, "wchar_t*", "foo")
#
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1898,14 +1898,14 @@
def test_introspect_order():
ffi = FFI()
- ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
- ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;")
- ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;")
+ ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;")
+ ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;")
+ ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;")
verify(ffi, "test_introspect_order", """
- union aaa { int a; }; typedef struct ccc { int a; } b;
- union g { int a; }; typedef struct cc { int a; } bbb;
- union aa { int a; }; typedef struct a { int a; } bb;
+ union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+ union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;
+ union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;
""")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
@@ -280,6 +280,14 @@
pass
with open("setup.py", "w") as f:
f.write("""if 1:
+ # https://bugs.python.org/issue23246
+ import sys
+ if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
+
import cffi
ffi = cffi.FFI()
ffi.set_source("pack1.mymod", "/*code would be here*/")
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
@@ -80,8 +80,21 @@
# find a solution to that: we could hack sys.path inside the
# script run here, but we can't hack it in the same way in
# execute().
- output = self._run([sys.executable,
- os.path.join(local_dir, filename)])
+ pathname = os.path.join(path, filename)
+ with open(pathname, 'w') as g:
+ g.write('''
+# https://bugs.python.org/issue23246
+import sys
+if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
+''')
+ with open(os.path.join(local_dir, filename), 'r') as f:
+ g.write(f.read())
+
+ output = self._run([sys.executable, pathname])
match = re.compile(r"\bFILENAME: (.+)").search(output)
assert match
dynamic_lib_name = match.group(1)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/udir.py b/pypy/module/test_lib_pypy/cffi_tests/udir.py
--- a/pypy/module/test_lib_pypy/cffi_tests/udir.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/udir.py
@@ -1,4 +1,14 @@
# Generated by pypy/tool/import_cffi.py
import py
+import sys
udir = py.path.local.make_numbered_dir(prefix = 'ffi-')
+
+
+# Windows-only workaround for some configurations: see
+# https://bugs.python.org/issue23246 (Python 2.7.9)
+if sys.platform == 'win32':
+ try:
+ import setuptools
+ except ImportError:
+ pass
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -484,7 +484,12 @@
return None
def view_as_kwargs(self, w_dict):
- if type(w_dict) is W_DictObject:
+ # Tries to return (keys_list, values_list), or (None, None) if
+ # it fails. It can fail on some dict implementations, so don't
+ # rely on it. For dict subclasses, though, it never fails;
+ # this emulates CPython's behavior which often won't call
+ # custom __iter__() or keys() methods in dict subclasses.
+ if isinstance(w_dict, W_DictObject):
return w_dict.view_as_kwargs()
return (None, None)
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -3,7 +3,7 @@
min=1
rev=1
branchname=release-$maj.x # ==OR== release-$maj.$min.x
-tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev
+tagname=release-$maj.$min.$rev # ==OR== release-$maj.$min
hg log -r $branchname || exit 1
hg log -r $tagname || exit 1
diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py
--- a/rpython/jit/metainterp/optimizeopt/intutils.py
+++ b/rpython/jit/metainterp/optimizeopt/intutils.py
@@ -1,5 +1,8 @@
+import sys
from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, maxint, is_valid_int
from rpython.rlib.objectmodel import we_are_translated
+from rpython.rtyper.lltypesystem import lltype
+from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.metainterp.resoperation import rop, ResOperation
from rpython.jit.metainterp.optimizeopt.info import AbstractInfo, INFO_NONNULL,\
INFO_UNKNOWN, INFO_NULL
@@ -174,15 +177,13 @@
def div_bound(self, other):
if self.has_upper and self.has_lower and \
other.has_upper and other.has_lower and \
- not other.contains(0):
- try:
- vals = (ovfcheck(self.upper / other.upper),
- ovfcheck(self.upper / other.lower),
- ovfcheck(self.lower / other.upper),
- ovfcheck(self.lower / other.lower))
- return IntBound(min4(vals), max4(vals))
- except OverflowError:
- return IntUnbounded()
+ not other.contains(0) and self.lower > (-sys.maxint-1):
+ vals = (
+ llop.int_floordiv(lltype.Signed, self.upper, other.upper),
+ llop.int_floordiv(lltype.Signed, self.upper, other.lower),
+ llop.int_floordiv(lltype.Signed, self.lower, other.upper),
+ llop.int_floordiv(lltype.Signed, self.lower, other.lower))
+ return IntBound(min4(vals), max4(vals))
else:
return IntUnbounded()
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py
@@ -240,6 +240,8 @@
def test_div_bound():
+ from rpython.rtyper.lltypesystem import lltype
+ from rpython.rtyper.lltypesystem.lloperation import llop
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.div_bound(b2)
@@ -247,7 +249,8 @@
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
if n2 != 0:
- assert b3.contains(n1 / n2)
+ assert b3.contains(
+ llop.int_floordiv(lltype.Signed, n1, n2))
a=bound(2, 4).div_bound(bound(1, 2))
assert not a.contains(0)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -5529,6 +5529,27 @@
"""
self.optimize_loop(ops, expected)
+ def test_division_bound_bug(self):
+ ops = """
+ [i4]
+ i1 = int_ge(i4, -50)
+ guard_true(i1) []
+ i2 = int_le(i4, -40)
+ guard_true(i2) []
+ # here, -50 <= i4 <= -40
+
+ i5 = int_floordiv(i4, 30)
+ # here, we know that that i5 == -1 (C-style handling of negatives!)
+ escape_n(i5)
+ jump(i4)
+ """
+ expected = """
+ [i4, i5]
+ escape_n(-1)
+ jump(i4, -1)
+ """
+ self.optimize_loop(ops, expected)
+
def test_subsub_ovf(self):
ops = """
[i0]
diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py
--- a/rpython/rlib/runicode.py
+++ b/rpython/rlib/runicode.py
@@ -989,8 +989,6 @@
return result.build(), pos
-# Specialize on the errorhandler when it's a constant
- at specialize.arg_or_var(4)
def str_decode_ascii(s, size, errors, final=False,
errorhandler=None):
if errorhandler is None:
@@ -1020,8 +1018,6 @@
return result.build()
-# Specialize on the errorhandler when it's a constant
- at specialize.arg_or_var(3)
def unicode_encode_ucs1_helper(p, size, errors,
errorhandler=None, limit=256):
if errorhandler is None:
@@ -1064,12 +1060,10 @@
return result.build()
- at specialize.arg_or_var(3)
def unicode_encode_latin_1(p, size, errors, errorhandler=None):
res = unicode_encode_ucs1_helper(p, size, errors, errorhandler, 256)
return res
- at specialize.arg_or_var(3)
def unicode_encode_ascii(p, size, errors, errorhandler=None):
res = unicode_encode_ucs1_helper(p, size, errors, errorhandler, 128)
return res
@@ -1194,8 +1188,6 @@
builder.append(res)
return pos
-# Specialize on the errorhandler when it's a constant
- at specialize.arg_or_var(4)
def str_decode_unicode_escape(s, size, errors, final=False,
errorhandler=None,
unicodedata_handler=None):
diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py
--- a/rpython/translator/driver.py
+++ b/rpython/translator/driver.py
@@ -552,16 +552,16 @@
self.log.info('usession directory: %s' % (udir,))
return result
- @staticmethod
- def from_targetspec(targetspec_dic, config=None, args=None,
+ @classmethod
+ def from_targetspec(cls, targetspec_dic, config=None, args=None,
empty_translator=None,
disable=[],
default_goal=None):
if args is None:
args = []
- driver = TranslationDriver(config=config, default_goal=default_goal,
- disable=disable)
+ driver = cls(config=config, default_goal=default_goal,
+ disable=disable)
target = targetspec_dic['target']
spec = target(driver, args)
From pypy.commits at gmail.com Mon May 9 04:38:47 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:38:47 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: branch doc
Message-ID: <57304c97.c6bdc20a.11490.ffff99d1@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84325:ca14ff0b53ba
Date: 2016-05-09 10:38 +0200
http://bitbucket.org/pypy/pypy/changeset/ca14ff0b53ba/
Log: branch doc
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -84,3 +84,8 @@
.. branch: cpyext-more-slots
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
From pypy.commits at gmail.com Mon May 9 04:38:49 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:38:49 -0700 (PDT)
Subject: [pypy-commit] pypy use-gc-del-3: ready to merge
Message-ID: <57304c99.26b0c20a.ef1f4.ffffb70f@mx.google.com>
Author: Armin Rigo
Branch: use-gc-del-3
Changeset: r84326:0a4682d01440
Date: 2016-05-09 10:38 +0200
http://bitbucket.org/pypy/pypy/changeset/0a4682d01440/
Log: ready to merge
From pypy.commits at gmail.com Mon May 9 04:38:52 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:38:52 -0700 (PDT)
Subject: [pypy-commit] pypy default: hg merge use-gc-del-3
Message-ID: <57304c9c.a82cc20a.62e83.ffffbac4@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84327:c8b895fb3548
Date: 2016-05-09 10:39 +0200
http://bitbucket.org/pypy/pypy/changeset/c8b895fb3548/
Log: hg merge use-gc-del-3
Use the new rgc.FinalizerQueue mechanism to clean up the handling of
``__del__`` methods. Fixes notably issue #2287. (All RPython
subclasses of W_Root need to use FinalizerQueue now.)
diff too long, truncating to 2000 out of 2012 lines
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -33,26 +33,25 @@
it from a finalizer. A finalizer runs earlier, and in topological
order; care must be taken that the object might still be reachable at
this point if we're clever enough. A destructor on the other hand runs
-last; nothing can be done with the object any more.
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
Destructors
-----------
A destructor is an RPython ``__del__()`` method that is called directly
-by the GC when there is no more reference to an object. Intended for
-objects that just need to free a block of raw memory or close a file.
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
There are restrictions on the kind of code you can put in ``__del__()``,
including all other functions called by it. These restrictions are
-checked. In particular you cannot access fields containing GC objects;
-and if you call an external C function, it must be a "safe" function
-(e.g. not releasing the GIL; use ``releasegil=False`` in
-``rffi.llexternal()``).
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
-If there are several objects with destructors that die during the same
-GC cycle, they are called in a completely random order --- but that
-should not matter because destructors cannot do much anyway.
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
Register_finalizer
@@ -95,10 +94,15 @@
To find the queued items, call ``fin.next_dead()`` repeatedly. It
returns the next queued item, or ``None`` when the queue is empty.
-It is allowed in theory to cumulate several different
+In theory, it would kind of work if you cumulate several different
``FinalizerQueue`` instances for objects of the same class, and
(always in theory) the same ``obj`` could be registered several times
in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
Ordering of finalizers
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -84,3 +84,8 @@
.. branch: cpyext-more-slots
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,37 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
+ """
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
"""
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -389,9 +401,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
@@ -1844,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
@@ -515,75 +521,98 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+ self._call_finalizer(w_obj)
+
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
+ pdd = self.pending_with_disabled_del
+ if pdd is None:
+ return False
+ else:
+ pdd.append(w_obj)
+ return True
+
+ def _call_finalizer(self, w_obj):
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
space = self.space
- while pending is not None:
+ if w_obj.typedef is None:
+ w_del = None # obscure case: for WeakrefLifeline
+ else:
+ w_del = space.lookup(w_obj, '__del__')
+ if w_del is not None:
+ if self.gc_disabled(w_obj):
+ return
try:
- pending.callback(pending.w_obj)
- except OperationError as e:
- e.write_unraisable(space, pending.descrname, pending.w_obj)
- e.clear(space) # break up reference cycles
- pending = pending.next
- #
- # Note: 'dying_objects' used to be just a regular list instead
- # of a chained list. This was the cause of "leaks" if we have a
- # program that constantly creates new objects with finalizers.
- # Here is why: say 'dying_objects' is a long list, and there
- # are n instances in it. Then we spend some time in this
- # function, possibly triggering more GCs, but keeping the list
- # of length n alive. Then the list is suddenly freed at the
- # end, and we return to the user program. At this point the
- # GC limit is still very high, because just before, there was
- # a list of length n alive. Assume that the program continues
- # to allocate a lot of instances with finalizers. The high GC
- # limit means that it could allocate a lot of instances before
- # reaching it --- possibly more than n. So the whole procedure
- # repeats with higher and higher values of n.
- #
- # This does not occur in the current implementation because
- # there is no list of length n: if n is large, then the GC
- # will run several times while walking the list, but it will
- # see lower and lower memory usage, with no lower bound of n.
+ space.get_and_call_function(w_del, w_obj)
+ except Exception as e:
+ report_error(space, e, "method __del__ of ", w_obj)
+
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ report_error(space, e, "finalizer of ", w_obj)
+
+
+def report_error(space, e, where, w_obj):
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
+
+
+def make_finalizer_queue(W_Root, space):
+ """Make a FinalizerQueue subclass which responds to GC finalizer
+ events by 'firing' the UserDelAction class above. It does not
+ directly fetches the objects to finalize at all; they stay in the
+ GC-managed queue, and will only be fetched by UserDelAction
+ (between bytecodes)."""
+
+ class WRootFinalizerQueue(rgc.FinalizerQueue):
+ Class = W_Root
+
+ def finalizer_trigger(self):
+ space.user_del_action.fire()
+
+ space.user_del_action = UserDelAction(space)
+ space.finalizer_queue = WRootFinalizerQueue()
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,6 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
+from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY
from rpython.rlib import jit
@@ -13,6 +14,8 @@
self.frame = frame # turned into None when frame_finished_execution
self.pycode = frame.pycode
self.running = False
+ if self.pycode.co_flags & CO_YIELD_INSIDE_TRY:
+ self.register_finalizer(self.space)
def descr__repr__(self, space):
if self.pycode is None:
@@ -139,7 +142,6 @@
def descr_close(self):
"""x.close(arg) -> raise GeneratorExit inside generator."""
- assert isinstance(self, GeneratorIterator)
space = self.space
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
@@ -212,25 +214,21 @@
unpack_into = _create_unpack_into()
unpack_into_w = _create_unpack_into()
-
-class GeneratorIteratorWithDel(GeneratorIterator):
-
- def __del__(self):
- # Only bother enqueuing self to raise an exception if the frame is
- # still not finished and finally or except blocks are present.
- self.clear_all_weakrefs()
+ def _finalize_(self):
+ # This is only called if the CO_YIELD_INSIDE_TRY flag is set
+ # on the code object. If the frame is still not finished and
+ # finally or except blocks are present at the current
+ # position, then raise a GeneratorExit. Otherwise, there is
+ # no point.
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
if not isinstance(block, LoopBlock):
- self.enqueue_for_destruction(self.space,
- GeneratorIterator.descr_close,
- "interrupting generator of ")
+ self.descr_close()
break
block = block.previous
-
def get_printable_location_genentry(bytecode):
return '%s ' % (bytecode.get_repr(),)
generatorentry_driver = jit.JitDriver(greens=['pycode'],
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -241,12 +241,8 @@
def run(self):
"""Start this frame's execution."""
if self.getcode().co_flags & pycode.CO_GENERATOR:
- if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY:
- from pypy.interpreter.generator import GeneratorIteratorWithDel
- return self.space.wrap(GeneratorIteratorWithDel(self))
- else:
- from pypy.interpreter.generator import GeneratorIterator
- return self.space.wrap(GeneratorIterator(self))
+ from pypy.interpreter.generator import GeneratorIterator
+ return self.space.wrap(GeneratorIterator(self))
else:
return self.execute_frame()
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -127,10 +127,7 @@
""" % (slots, methodname, checks[0], checks[1],
checks[2], checks[3]))
subclasses = {}
- for key, subcls in typedef._subclass_cache.items():
- if key[0] is not space.config:
- continue
- cls = key[1]
+ for cls, subcls in typedef._unique_subclass_cache.items():
subclasses.setdefault(cls, {})
prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls)
assert subcls is prevsubcls
@@ -186,35 +183,20 @@
class W_Level1(W_Root):
def __init__(self, space1):
assert space1 is space
- def __del__(self):
+ self.register_finalizer(space)
+ def _finalize_(self):
space.call_method(w_seen, 'append', space.wrap(1))
- class W_Level2(W_Root):
- def __init__(self, space1):
- assert space1 is space
- def __del__(self):
- self.enqueue_for_destruction(space, W_Level2.destructormeth,
- 'FOO ')
- def destructormeth(self):
- space.call_method(w_seen, 'append', space.wrap(2))
W_Level1.typedef = typedef.TypeDef(
'level1',
__new__ = typedef.generic_new_descr(W_Level1))
- W_Level2.typedef = typedef.TypeDef(
- 'level2',
- __new__ = typedef.generic_new_descr(W_Level2))
#
w_seen = space.newlist([])
W_Level1(space)
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [1]
- #
- w_seen = space.newlist([])
- W_Level2(space)
- gc.collect(); gc.collect()
assert space.str_w(space.repr(w_seen)) == "[]" # not called yet
ec = space.getexecutioncontext()
self.space.user_del_action.perform(ec, None)
- assert space.unwrap(w_seen) == [2]
+ assert space.unwrap(w_seen) == [1] # called by user_del_action
#
w_seen = space.newlist([])
self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
@@ -236,29 +218,17 @@
A4()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [4, 1]
+ assert space.unwrap(w_seen) == [4, 1] # user __del__, and _finalize_
#
w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef)],
+ self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
"""(level2):
class A5(level2):
pass
A5()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [2]
- #
- w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef),
- w_seen],
- """(level2, seen):
- class A6(level2):
- def __del__(self):
- seen.append(6)
- A6()
- """)
- gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [6, 2]
+ assert space.unwrap(w_seen) == [1] # _finalize_ only
def test_multiple_inheritance(self):
class W_A(W_Root):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -24,6 +24,8 @@
self.bases = bases
self.heaptype = False
self.hasdict = '__dict__' in rawdict
+ # no __del__: use an RPython _finalize_() method and register_finalizer
+ assert '__del__' not in rawdict
self.weakrefable = '__weakref__' in rawdict
self.doc = rawdict.pop('__doc__', None)
for base in bases:
@@ -103,26 +105,20 @@
# we need two subclasses of the app-level type, one to add mapdict, and then one
# to add del to not slow down the GC.
-def get_unique_interplevel_subclass(space, cls, needsdel=False):
+def get_unique_interplevel_subclass(space, cls):
"NOT_RPYTHON: initialization-time only"
- if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
- needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = space, cls, needsdel
try:
- return _subclass_cache[key]
+ return _unique_subclass_cache[cls]
except KeyError:
- # XXX can save a class if cls already has a __del__
- if needsdel:
- cls = get_unique_interplevel_subclass(space, cls, False)
- subcls = _getusercls(space, cls, needsdel)
- assert key not in _subclass_cache
- _subclass_cache[key] = subcls
+ subcls = _getusercls(cls)
+ assert cls not in _unique_subclass_cache
+ _unique_subclass_cache[cls] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
-_subclass_cache = {}
+_unique_subclass_cache = {}
-def _getusercls(space, cls, wants_del, reallywantdict=False):
+def _getusercls(cls, reallywantdict=False):
from rpython.rlib import objectmodel
from pypy.objspace.std.objectobject import W_ObjectObject
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
@@ -132,11 +128,10 @@
typedef = cls.typedef
name = cls.__name__ + "User"
- mixins_needed = []
if cls is W_ObjectObject or cls is W_InstanceObject:
- mixins_needed.append(_make_storage_mixin_size_n())
+ base_mixin = _make_storage_mixin_size_n()
else:
- mixins_needed.append(MapdictStorageMixin)
+ base_mixin = MapdictStorageMixin
copy_methods = [BaseUserClassMapdict]
if reallywantdict or not typedef.hasdict:
# the type has no dict, mapdict to provide the dict
@@ -147,44 +142,12 @@
# support
copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
- if wants_del:
- # This subclass comes with an app-level __del__. To handle
- # it, we make an RPython-level __del__ method. This
- # RPython-level method is called directly by the GC and it
- # cannot do random things (calling the app-level __del__ would
- # be "random things"). So instead, we just call here
- # enqueue_for_destruction(), and the app-level __del__ will be
- # called later at a safe point (typically between bytecodes).
- # If there is also an inherited RPython-level __del__, it is
- # called afterwards---not immediately! This base
- # RPython-level __del__ is supposed to run only when the
- # object is not reachable any more. NOTE: it doesn't fully
- # work: see issue #2287.
- name += "Del"
- parent_destructor = getattr(cls, '__del__', None)
- def call_parent_del(self):
- assert isinstance(self, subcls)
- parent_destructor(self)
- def call_applevel_del(self):
- assert isinstance(self, subcls)
- space.userdel(self)
- class Proto(object):
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(space, call_applevel_del,
- 'method __del__ of ')
- if parent_destructor is not None:
- self.enqueue_for_destruction(space, call_parent_del,
- 'internal destructor of ')
- mixins_needed.append(Proto)
class subcls(cls):
user_overridden_class = True
- for base in mixins_needed:
- objectmodel.import_from_mixin(base)
+ objectmodel.import_from_mixin(base_mixin)
for copycls in copy_methods:
_copy_methods(copycls, subcls)
- del subcls.base
subcls.__name__ = name
return subcls
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -44,13 +44,12 @@
self.bases_w = bases
self.w_dict = w_dict
+ def has_user_del(self, space):
+ return self.lookup(space, '__del__') is not None
+
def instantiate(self, space):
cache = space.fromcache(Cache)
- if self.lookup(space, '__del__') is not None:
- w_inst = cache.cls_with_del(space, self)
- else:
- w_inst = cache.cls_without_del(space, self)
- return w_inst
+ return cache.InstanceObjectCls(space, self)
def getdict(self, space):
return self.w_dict
@@ -132,9 +131,9 @@
self.setbases(space, w_value)
return
elif name == "__del__":
- if self.lookup(space, name) is None:
+ if not self.has_user_del(space):
msg = ("a __del__ method added to an existing class will "
- "not be called")
+ "only be called on instances made from now on")
space.warn(space.wrap(msg), space.w_RuntimeWarning)
space.setitem(self.w_dict, w_attr, w_value)
@@ -184,14 +183,11 @@
if hasattr(space, 'is_fake_objspace'):
# hack: with the fake objspace, we don't want to see typedef's
# _getusercls() at all
- self.cls_without_del = W_InstanceObject
- self.cls_with_del = W_InstanceObject
+ self.InstanceObjectCls = W_InstanceObject
return
- self.cls_without_del = _getusercls(
- space, W_InstanceObject, False, reallywantdict=True)
- self.cls_with_del = _getusercls(
- space, W_InstanceObject, True, reallywantdict=True)
+ self.InstanceObjectCls = _getusercls(
+ W_InstanceObject, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
@@ -297,12 +293,15 @@
class W_InstanceObject(W_Root):
def __init__(self, space, w_class):
# note that user_setup is overridden by the typedef.py machinery
+ self.space = space
self.user_setup(space, space.gettypeobject(self.typedef))
assert isinstance(w_class, W_ClassObject)
self.w_class = w_class
+ if w_class.has_user_del(space):
+ space.finalizer_queue.register_finalizer(self)
def user_setup(self, space, w_subtype):
- self.space = space
+ pass
def set_oldstyle_class(self, space, w_class):
if w_class is None or not isinstance(w_class, W_ClassObject):
@@ -368,8 +367,7 @@
self.set_oldstyle_class(space, w_value)
return
if name == '__del__' and w_meth is None:
- cache = space.fromcache(Cache)
- if (not isinstance(self, cache.cls_with_del)
+ if (not self.w_class.has_user_del(space)
and self.getdictvalue(space, '__del__') is None):
msg = ("a __del__ method added to an instance with no "
"__del__ in the class will not be called")
@@ -646,13 +644,14 @@
raise oefmt(space.w_TypeError, "instance has no next() method")
return space.call_function(w_func)
- def descr_del(self, space):
- # Note that this is called from executioncontext.UserDelAction
- # via the space.userdel() method.
+ def _finalize_(self):
+ space = self.space
w_func = self.getdictvalue(space, '__del__')
if w_func is None:
w_func = self.getattr_from_class(space, '__del__')
if w_func is not None:
+ if self.space.user_del_action.gc_disabled(self):
+ return
space.call_function(w_func)
def descr_exit(self, space, w_type, w_value, w_tb):
@@ -729,7 +728,6 @@
__pow__ = interp2app(W_InstanceObject.descr_pow),
__rpow__ = interp2app(W_InstanceObject.descr_rpow),
next = interp2app(W_InstanceObject.descr_next),
- __del__ = interp2app(W_InstanceObject.descr_del),
__exit__ = interp2app(W_InstanceObject.descr_exit),
__dict__ = dict_descr,
**rawdict
diff --git a/pypy/module/_cffi_backend/allocator.py b/pypy/module/_cffi_backend/allocator.py
--- a/pypy/module/_cffi_backend/allocator.py
+++ b/pypy/module/_cffi_backend/allocator.py
@@ -45,14 +45,11 @@
rffi.c_memset(rffi.cast(rffi.VOIDP, ptr), 0,
rffi.cast(rffi.SIZE_T, datasize))
#
- if self.w_free is None:
- # use this class which does not have a __del__, but still
- # keeps alive w_raw_cdata
- res = cdataobj.W_CDataNewNonStdNoFree(space, ptr, ctype, length)
- else:
- res = cdataobj.W_CDataNewNonStdFree(space, ptr, ctype, length)
+ res = cdataobj.W_CDataNewNonStd(space, ptr, ctype, length)
+ res.w_raw_cdata = w_raw_cdata
+ if self.w_free is not None:
res.w_free = self.w_free
- res.w_raw_cdata = w_raw_cdata
+ res.register_finalizer(space)
return res
@unwrap_spec(w_init=WrappedDefault(None))
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -449,22 +449,11 @@
lltype.free(self._ptr, flavor='raw')
-class W_CDataNewNonStdNoFree(W_CDataNewOwning):
- """Subclass using a non-standard allocator, no free()"""
- _attrs_ = ['w_raw_cdata']
+class W_CDataNewNonStd(W_CDataNewOwning):
+ """Subclass using a non-standard allocator"""
+ _attrs_ = ['w_raw_cdata', 'w_free']
-class W_CDataNewNonStdFree(W_CDataNewNonStdNoFree):
- """Subclass using a non-standard allocator, with a free()"""
- _attrs_ = ['w_free']
-
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space,
- W_CDataNewNonStdFree.call_destructor,
- 'destructor of ')
-
- def call_destructor(self):
- assert isinstance(self, W_CDataNewNonStdFree)
+ def _finalize_(self):
self.space.call_function(self.w_free, self.w_raw_cdata)
@@ -552,14 +541,9 @@
W_CData.__init__(self, space, cdata, ctype)
self.w_original_cdata = w_original_cdata
self.w_destructor = w_destructor
+ self.register_finalizer(space)
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_CDataGCP.call_destructor,
- 'destructor of ')
-
- def call_destructor(self):
- assert isinstance(self, W_CDataGCP)
+ def _finalize_(self):
w_destructor = self.w_destructor
if w_destructor is not None:
self.w_destructor = None
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -25,10 +25,13 @@
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
+ self.register_finalizer(ffi.space)
- def __del__(self):
- if self.libhandle:
- dlclose(self.libhandle)
+ def _finalize_(self):
+ h = self.libhandle
+ if h != rffi.cast(DLLHANDLE, 0):
+ self.libhandle = rffi.cast(DLLHANDLE, 0)
+ dlclose(h)
def cdlopen_fetch(self, name):
if not self.libhandle:
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -15,7 +15,6 @@
class W_Library(W_Root):
_immutable_ = True
- handle = rffi.cast(DLLHANDLE, 0)
def __init__(self, space, filename, flags):
self.space = space
@@ -27,8 +26,9 @@
except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
h = self.handle
if h != rffi.cast(DLLHANDLE, 0):
self.handle = rffi.cast(DLLHANDLE, 0)
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -43,22 +43,18 @@
def __init__(self, space):
self.space = space
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
# assume that the file and stream objects are only visible in the
- # thread that runs __del__, so no race condition should be possible
- self.clear_all_weakrefs()
+ # thread that runs _finalize_, so no race condition should be
+ # possible and no locking is done here.
if self.stream is not None:
- self.enqueue_for_destruction(self.space, W_File.destructor,
- 'close() method of ')
-
- def destructor(self):
- assert isinstance(self, W_File)
- try:
- self.direct_close()
- except StreamErrors as e:
- operr = wrap_streamerror(self.space, e, self.w_name)
- raise operr
+ try:
+ self.direct_close()
+ except StreamErrors as e:
+ operr = wrap_streamerror(self.space, e, self.w_name)
+ raise operr
def fdopenstream(self, stream, fd, mode, w_name=None):
self.fd = fd
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -76,11 +76,14 @@
except:
lltype.free(ctx, flavor='raw')
raise
+ self.register_finalizer(space)
- def __del__(self):
- if self.ctx:
- ropenssl.EVP_MD_CTX_cleanup(self.ctx)
- lltype.free(self.ctx, flavor='raw')
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
+ ropenssl.EVP_MD_CTX_cleanup(ctx)
+ lltype.free(ctx, flavor='raw')
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -952,9 +952,15 @@
self.w_writer = None
raise
- def __del__(self):
- self.clear_all_weakrefs()
+ def _finalize_(self):
# Don't call the base __del__: do not close the files!
+ # Usually the _finalize_() method is not called at all because
+ # we set 'needs_to_finalize = False' in this class, so
+ # W_IOBase.__init__() won't call register_finalizer().
+ # However, this method might still be called: if the user
+ # makes an app-level subclass and adds a custom __del__.
+ pass
+ needs_to_finalize = False
# forward to reader
for method in ['read', 'peek', 'read1', 'readinto', 'readable']:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -59,6 +59,8 @@
self.__IOBase_closed = False
if add_to_autoflusher:
get_autoflusher(space).add(self)
+ if self.needs_to_finalize:
+ self.register_finalizer(space)
def getdict(self, space):
return self.w_dict
@@ -71,13 +73,7 @@
return True
return False
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_IOBase.destructor,
- 'internal __del__ of ')
-
- def destructor(self):
- assert isinstance(self, W_IOBase)
+ def _finalize_(self):
space = self.space
w_closed = space.findattr(self, space.wrap('closed'))
try:
@@ -90,6 +86,7 @@
# equally as bad, and potentially more frequent (because of
# shutdown issues).
pass
+ needs_to_finalize = True
def _CLOSED(self):
# Use this macro whenever you want to check the internal `closed`
diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py
--- a/pypy/module/_multibytecodec/interp_incremental.py
+++ b/pypy/module/_multibytecodec/interp_incremental.py
@@ -20,8 +20,9 @@
self.codec = codec.codec
self.name = codec.name
self._initialize()
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
self._free()
def reset_w(self):
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -40,14 +40,17 @@
BUFFER_SIZE = 1024
buffer = lltype.nullptr(rffi.CCHARP.TO)
- def __init__(self, flags):
+ def __init__(self, space, flags):
self.flags = flags
self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE,
flavor='raw')
+ self.register_finalizer(space)
- def __del__(self):
- if self.buffer:
- lltype.free(self.buffer, flavor='raw')
+ def _finalize_(self):
+ buf = self.buffer
+ if buf:
+ self.buffer = lltype.nullptr(rffi.CCHARP.TO)
+ lltype.free(buf, flavor='raw')
try:
self.do_close()
except OSError:
@@ -242,7 +245,7 @@
def __init__(self, space, fd, flags):
if fd == self.INVALID_HANDLE_VALUE or fd < 0:
raise oefmt(space.w_IOError, "invalid handle %d", fd)
- W_BaseConnection.__init__(self, flags)
+ W_BaseConnection.__init__(self, space, flags)
self.fd = fd
@unwrap_spec(fd=int, readable=bool, writable=bool)
@@ -363,8 +366,8 @@
if sys.platform == 'win32':
from rpython.rlib.rwin32 import INVALID_HANDLE_VALUE
- def __init__(self, handle, flags):
- W_BaseConnection.__init__(self, flags)
+ def __init__(self, space, handle, flags):
+ W_BaseConnection.__init__(self, space, flags)
self.handle = handle
@unwrap_spec(readable=bool, writable=bool)
@@ -375,7 +378,7 @@
flags = (readable and READABLE) | (writable and WRITABLE)
self = space.allocate_instance(W_PipeConnection, w_subtype)
- W_PipeConnection.__init__(self, handle, flags)
+ W_PipeConnection.__init__(self, space, handle, flags)
return space.wrap(self)
def descr_repr(self, space):
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -430,11 +430,12 @@
class W_SemLock(W_Root):
- def __init__(self, handle, kind, maxvalue):
+ def __init__(self, space, handle, kind, maxvalue):
self.handle = handle
self.kind = kind
self.count = 0
self.maxvalue = maxvalue
+ self.register_finalizer(space)
def kind_get(self, space):
return space.newint(self.kind)
@@ -508,7 +509,7 @@
@unwrap_spec(kind=int, maxvalue=int)
def rebuild(space, w_cls, w_handle, kind, maxvalue):
self = space.allocate_instance(W_SemLock, w_cls)
- self.__init__(handle_w(space, w_handle), kind, maxvalue)
+ self.__init__(space, handle_w(space, w_handle), kind, maxvalue)
return space.wrap(self)
def enter(self, space):
@@ -517,7 +518,7 @@
def exit(self, space, __args__):
self.release(space)
- def __del__(self):
+ def _finalize_(self):
delete_semaphore(self.handle)
@unwrap_spec(kind=int, value=int, maxvalue=int)
@@ -534,7 +535,7 @@
raise wrap_oserror(space, e)
self = space.allocate_instance(W_SemLock, w_subtype)
- self.__init__(handle, kind, maxvalue)
+ self.__init__(space, handle, kind, maxvalue)
return space.wrap(self)
diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -4,7 +4,7 @@
from pypy.interpreter.function import Function, Method
from pypy.interpreter.module import Module
from pypy.interpreter.pytraceback import PyTraceback
-from pypy.interpreter.generator import GeneratorIteratorWithDel
+from pypy.interpreter.generator import GeneratorIterator
from rpython.rlib.objectmodel import instantiate
from pypy.interpreter.gateway import unwrap_spec
from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject
@@ -59,7 +59,7 @@
return space.wrap(tb)
def generator_new(space):
- new_generator = instantiate(GeneratorIteratorWithDel)
+ new_generator = instantiate(GeneratorIterator)
return space.wrap(new_generator)
@unwrap_spec(current=int, remaining=int, step=int)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -278,6 +278,8 @@
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
self.ssl = libssl_SSL_new(w_ctx.ctx) # new ssl struct
+ self.register_finalizer(space)
+
index = compute_unique_id(self)
libssl_SSL_set_app_data(self.ssl, rffi.cast(rffi.VOIDP, index))
SOCKET_STORAGE.set(index, self)
@@ -317,16 +319,15 @@
self.ssl_sock_weakref_w = None
return self
- def __del__(self):
- self.enqueue_for_destruction(self.space, _SSLSocket.destructor,
- '__del__() method of ')
-
- def destructor(self):
- assert isinstance(self, _SSLSocket)
- if self.peer_cert:
- libssl_X509_free(self.peer_cert)
- if self.ssl:
- libssl_SSL_free(self.ssl)
+ def _finalize_(self):
+ peer_cert = self.peer_cert
+ if peer_cert:
+ self.peer_cert = lltype.nullptr(X509.TO)
+ libssl_X509_free(peer_cert)
+ ssl = self.ssl
+ if ssl:
+ self.ssl = lltype.nullptr(SSL.TO)
+ libssl_SSL_free(ssl)
@unwrap_spec(data='bufferstr')
def write(self, space, data):
@@ -1285,6 +1286,7 @@
self = space.allocate_instance(_SSLContext, w_subtype)
self.ctx = ctx
self.check_hostname = False
+ self.register_finalizer(space)
options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
if protocol != PY_SSL_VERSION_SSL2:
options |= SSL_OP_NO_SSLv2
@@ -1308,8 +1310,11 @@
return self
- def __del__(self):
- libssl_SSL_CTX_free(self.ctx)
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(SSL_CTX.TO)
+ libssl_SSL_CTX_free(ctx)
@unwrap_spec(server_side=int)
def descr_wrap_socket(self, space, w_sock, server_side, w_server_hostname=None, w_ssl_sock=None):
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -3,7 +3,8 @@
from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, ObjSpace
from pypy.interpreter.typedef import TypeDef
-from rpython.rlib import jit
+from pypy.interpreter.executioncontext import AsyncAction, report_error
+from rpython.rlib import jit, rgc
from rpython.rlib.rshrinklist import AbstractShrinkList
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rweakref import dead_ref
@@ -16,9 +17,12 @@
class WeakrefLifeline(W_Root):
+ typedef = None
+
cached_weakref = None
cached_proxy = None
other_refs_weak = None
+ has_callbacks = False
def __init__(self, space):
self.space = space
@@ -99,31 +103,10 @@
return w_ref
return space.w_None
-
-class WeakrefLifelineWithCallbacks(WeakrefLifeline):
-
- def __init__(self, space, oldlifeline=None):
- self.space = space
- if oldlifeline is not None:
- self.cached_weakref = oldlifeline.cached_weakref
- self.cached_proxy = oldlifeline.cached_proxy
- self.other_refs_weak = oldlifeline.other_refs_weak
-
- def __del__(self):
- """This runs when the interp-level object goes away, and allows
- its lifeline to go away. The purpose of this is to activate the
- callbacks even if there is no __del__ method on the interp-level
- W_Root subclass implementing the object.
- """
- if self.other_refs_weak is None:
- return
- items = self.other_refs_weak.items()
- for i in range(len(items)-1, -1, -1):
- w_ref = items[i]()
- if w_ref is not None and w_ref.w_callable is not None:
- w_ref.enqueue_for_destruction(self.space,
- W_WeakrefBase.activate_callback,
- 'weakref callback of ')
+ def enable_callbacks(self):
+ if not self.has_callbacks:
+ self.space.finalizer_queue.register_finalizer(self)
+ self.has_callbacks = True
@jit.dont_look_inside
def make_weakref_with_callback(self, w_subtype, w_obj, w_callable):
@@ -131,6 +114,7 @@
w_ref = space.allocate_instance(W_Weakref, w_subtype)
W_Weakref.__init__(w_ref, space, w_obj, w_callable)
self.append_wref_to(w_ref)
+ self.enable_callbacks()
return w_ref
@jit.dont_look_inside
@@ -141,8 +125,33 @@
else:
w_proxy = W_Proxy(space, w_obj, w_callable)
self.append_wref_to(w_proxy)
+ self.enable_callbacks()
return w_proxy
+ def _finalize_(self):
+ """This is called at the end, if enable_callbacks() was invoked.
+ It activates the callbacks.
+ """
+ if self.other_refs_weak is None:
+ return
+ #
+ # If this is set, then we're in the 'gc.disable()' mode. In that
+ # case, don't invoke the callbacks now.
+ if self.space.user_del_action.gc_disabled(self):
+ return
+ #
+ items = self.other_refs_weak.items()
+ self.other_refs_weak = None
+ for i in range(len(items)-1, -1, -1):
+ w_ref = items[i]()
+ if w_ref is not None and w_ref.w_callable is not None:
+ try:
+ w_ref.activate_callback()
+ except Exception as e:
+ report_error(self.space, e,
+ "weakref callback ", w_ref.w_callable)
+
+
# ____________________________________________________________
@@ -163,7 +172,6 @@
self.w_obj_weak = dead_ref
def activate_callback(w_self):
- assert isinstance(w_self, W_WeakrefBase)
w_self.space.call_function(w_self.w_callable, w_self)
def descr__repr__(self, space):
@@ -227,32 +235,16 @@
w_obj.setweakref(space, lifeline)
return lifeline
-def getlifelinewithcallbacks(space, w_obj):
- lifeline = w_obj.getweakref()
- if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None
- oldlifeline = lifeline
- lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline)
- w_obj.setweakref(space, lifeline)
- return lifeline
-
-
-def get_or_make_weakref(space, w_subtype, w_obj):
- return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj)
-
-
-def make_weakref_with_callback(space, w_subtype, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
-
def descr__new__weakref(space, w_subtype, w_obj, w_callable=None,
__args__=None):
if __args__.arguments_w:
raise oefmt(space.w_TypeError, "__new__ expected at most 2 arguments")
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_weakref(space, w_subtype, w_obj)
+ return lifeline.get_or_make_weakref(w_subtype, w_obj)
else:
- return make_weakref_with_callback(space, w_subtype, w_obj, w_callable)
+ return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
W_Weakref.typedef = TypeDef("weakref",
__doc__ = """A weak reference to an object 'obj'. A 'callback' can be given,
@@ -308,23 +300,15 @@
return space.call_args(w_obj, __args__)
-def get_or_make_proxy(space, w_obj):
- return getlifeline(space, w_obj).get_or_make_proxy(w_obj)
-
-
-def make_proxy_with_callback(space, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_proxy_with_callback(w_obj, w_callable)
-
-
def proxy(space, w_obj, w_callable=None):
"""Create a proxy object that weakly references 'obj'.
'callback', if given, is called with the proxy as an argument when 'obj'
is about to be finalized."""
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_proxy(space, w_obj)
+ return lifeline.get_or_make_proxy(w_obj)
else:
- return make_proxy_with_callback(space, w_obj, w_callable)
+ return lifeline.make_proxy_with_callback(w_obj, w_callable)
def descr__new__proxy(space, w_subtype, w_obj, w_callable=None):
raise oefmt(space.w_TypeError, "cannot create 'weakproxy' instances")
@@ -345,7 +329,7 @@
proxy_typedef_dict = {}
callable_proxy_typedef_dict = {}
-special_ops = {'repr': True, 'userdel': True, 'hash': True}
+special_ops = {'repr': True, 'hash': True}
for opname, _, arity, special_methods in ObjSpace.MethodTable:
if opname in special_ops or not special_methods:
diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py
--- a/pypy/module/_weakref/test/test_weakref.py
+++ b/pypy/module/_weakref/test/test_weakref.py
@@ -1,6 +1,9 @@
class AppTestWeakref(object):
spaceconfig = dict(usemodules=('_weakref',))
-
+
+ def setup_class(cls):
+ cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
+
def test_simple(self):
import _weakref, gc
class A(object):
@@ -287,6 +290,9 @@
assert a1 is None
def test_del_and_callback_and_id(self):
+ if not self.runappdirect:
+ skip("the id() doesn't work correctly in __del__ and "
+ "callbacks before translation")
import gc, weakref
seen_del = []
class A(object):
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -518,8 +518,14 @@
def __init__(self, space, compresslevel):
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self._init_bz2comp(compresslevel)
+ try:
+ self.running = False
+ self._init_bz2comp(compresslevel)
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
@@ -532,9 +538,12 @@
self.running = True
- def __del__(self):
- BZ2_bzCompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzCompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def compress(self, data):
@@ -621,10 +630,16 @@
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self.unused_data = ""
+ try:
+ self.running = False
+ self.unused_data = ""
- self._init_bz2decomp()
+ self._init_bz2decomp()
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2decomp(self):
bzerror = BZ2_bzDecompressInit(self.bzs, 0, 0)
@@ -633,9 +648,12 @@
self.running = True
- def __del__(self):
- BZ2_bzDecompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzDecompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def decompress(self, data):
diff --git a/pypy/module/bz2/test/support.py b/pypy/module/bz2/test/support.py
--- a/pypy/module/bz2/test/support.py
+++ b/pypy/module/bz2/test/support.py
@@ -10,5 +10,6 @@
#
while tries and ll2ctypes.ALLOCATED:
gc.collect() # to make sure we disallocate buffers
+ self.space.getexecutioncontext()._run_finalizers_now()
tries -= 1
assert not ll2ctypes.ALLOCATED
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1020,9 +1020,12 @@
class W_CPPInstance(W_Root):
- _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns']
+ _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns',
+ 'finalizer_registered']
_immutable_fields_ = ["cppclass", "isref"]
+ finalizer_registered = False
+
def __init__(self, space, cppclass, rawobject, isref, python_owns):
self.space = space
self.cppclass = cppclass
@@ -1032,6 +1035,12 @@
assert not isref or not python_owns
self.isref = isref
self.python_owns = python_owns
+ self._opt_register_finalizer()
+
+ def _opt_register_finalizer(self):
+ if self.python_owns and not self.finalizer_registered:
+ self.register_finalizer(self.space)
+ self.finalizer_registered = True
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
@@ -1045,6 +1054,7 @@
@unwrap_spec(value=bool)
def fset_python_owns(self, space, value):
self.python_owns = space.is_true(value)
+ self._opt_register_finalizer()
def get_cppthis(self, calling_scope):
return self.cppclass.get_cppthis(self, calling_scope)
@@ -1143,16 +1153,14 @@
(self.cppclass.name, rffi.cast(rffi.ULONG, self.get_rawobject())))
def destruct(self):
- assert isinstance(self, W_CPPInstance)
if self._rawobject and not self.isref:
memory_regulator.unregister(self)
capi.c_destruct(self.space, self.cppclass, self._rawobject)
self._rawobject = capi.C_NULL_OBJECT
- def __del__(self):
+ def _finalize_(self):
if self.python_owns:
- self.enqueue_for_destruction(self.space, W_CPPInstance.destruct,
- '__del__() method of ')
+ self.destruct()
W_CPPInstance.typedef = TypeDef(
'CPPInstance',
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -38,13 +38,23 @@
return space.newbool(space.user_del_action.enabled_at_app_level)
def enable_finalizers(space):
- if space.user_del_action.finalizers_lock_count == 0:
+ uda = space.user_del_action
+ if uda.finalizers_lock_count == 0:
raise oefmt(space.w_ValueError, "finalizers are already enabled")
- space.user_del_action.finalizers_lock_count -= 1
- space.user_del_action.fire()
+ uda.finalizers_lock_count -= 1
+ if uda.finalizers_lock_count == 0:
+ pending = uda.pending_with_disabled_del
+ uda.pending_with_disabled_del = None
+ if pending is not None:
+ for i in range(len(pending)):
+ uda._call_finalizer(pending[i])
+ pending[i] = None # clear the list as we progress
def disable_finalizers(space):
- space.user_del_action.finalizers_lock_count += 1
+ uda = space.user_del_action
+ uda.finalizers_lock_count += 1
+ if uda.pending_with_disabled_del is None:
+ uda.pending_with_disabled_del = []
# ____________________________________________________________
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -3,7 +3,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.argument import Arguments
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
from rpython.rlib.rarithmetic import LONG_BIT, maxint, _get_bitsize
from rpython.tool.sourcetools import func_with_new_name
from rpython.rlib.rawstorage import (
@@ -1534,6 +1534,7 @@
self.steps = alloc_raw_storage(0, track_allocation=False)
self.dims_steps_set = False
+ @rgc.must_be_light_finalizer
def __del__(self):
free_raw_storage(self.dims, track_allocation=False)
free_raw_storage(self.steps, track_allocation=False)
diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py
--- a/pypy/module/pyexpat/interp_pyexpat.py
+++ b/pypy/module/pyexpat/interp_pyexpat.py
@@ -421,8 +421,11 @@
class W_XMLParserType(W_Root):
+ id = -1
+
def __init__(self, space, parser, w_intern):
self.itself = parser
+ self.register_finalizer(space)
self.w_intern = w_intern
@@ -444,14 +447,17 @@
CallbackData(space, self))
XML_SetUserData(self.itself, rffi.cast(rffi.VOIDP, self.id))
- def __del__(self):
+ def _finalize_(self):
if XML_ParserFree: # careful with CPython interpreter shutdown
- XML_ParserFree(self.itself)
- if global_storage:
+ if self.itself:
+ XML_ParserFree(self.itself)
+ self.itself = lltype.nullptr(XML_Parser.TO)
+ if global_storage and self.id >= 0:
try:
global_storage.free_nonmoving_id(self.id)
except KeyError:
pass # maybe global_storage.clear() was already called
+ self.id = -1
@unwrap_spec(flag=int)
def SetParamEntityParsing(self, space, flag):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
--- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
@@ -28,10 +28,10 @@
p65 = getfield_gc_r(p14, descr=)
guard_value(p65, ConstPtr(ptr45), descr=...)
p66 = getfield_gc_r(p14, descr=)
- guard_nonnull_class(p66, ..., descr=...)
+ guard_nonnull(p66, descr=...)
p67 = force_token()
setfield_gc(p0, p67, descr=)
- p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=)
+ p68 = call_may_force_r(ConstClass(WeakrefLifeline.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
guard_nonnull_class(p68, ..., descr=...)
diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py
--- a/pypy/module/select/interp_epoll.py
+++ b/pypy/module/select/interp_epoll.py
@@ -80,6 +80,7 @@
class W_Epoll(W_Root):
def __init__(self, space, epfd):
self.epfd = epfd
+ self.register_finalizer(space)
@unwrap_spec(sizehint=int)
def descr__new__(space, w_subtype, sizehint=-1):
@@ -98,7 +99,7 @@
def descr_fromfd(space, w_cls, fd):
return space.wrap(W_Epoll(space, fd))
- def __del__(self):
+ def _finalize_(self):
self.close()
def check_closed(self, space):
diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py
--- a/pypy/module/select/interp_kqueue.py
+++ b/pypy/module/select/interp_kqueue.py
@@ -109,6 +109,7 @@
class W_Kqueue(W_Root):
def __init__(self, space, kqfd):
self.kqfd = kqfd
+ self.register_finalizer(space)
def descr__new__(space, w_subtype):
kqfd = syscall_kqueue()
@@ -120,7 +121,7 @@
def descr_fromfd(space, w_cls, fd):
return space.wrap(W_Kqueue(space, fd))
- def __del__(self):
+ def _finalize_(self):
self.close()
def get_closed(self):
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -148,8 +148,9 @@
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.deflateEnd(self.stream)
@@ -258,8 +259,9 @@
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.inflateEnd(self.stream)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -440,11 +440,6 @@
raise oefmt(space.w_TypeError,
"__hash__() should return an int or long")
- def userdel(space, w_obj):
- w_del = space.lookup(w_obj, '__del__')
- if w_del is not None:
- space.get_and_call_function(w_del, w_obj)
-
def cmp(space, w_v, w_w):
if space.is_w(w_v, w_w):
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -357,11 +357,12 @@
if cls.typedef.applevel_subclasses_base is not None:
cls = cls.typedef.applevel_subclasses_base
#
- subcls = get_unique_interplevel_subclass(
- self, cls, w_subtype.needsdel)
+ subcls = get_unique_interplevel_subclass(self, cls)
instance = instantiate(subcls)
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
+ if w_subtype.hasuserdel:
+ self.finalizer_queue.register_finalizer(instance)
else:
raise oefmt(self.w_TypeError,
"%N.__new__(%N): only for the type %N",
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -132,7 +132,7 @@
"flag_sequence_bug_compat",
"flag_map_or_seq", # '?' or 'M' or 'S'
"compares_by_identity_status?",
- 'needsdel',
+ 'hasuserdel',
'weakrefable',
'hasdict',
'layout',
@@ -160,7 +160,7 @@
w_self.bases_w = bases_w
w_self.dict_w = dict_w
w_self.hasdict = False
- w_self.needsdel = False
+ w_self.hasuserdel = False
w_self.weakrefable = False
w_self.w_doc = space.w_None
w_self.weak_subclasses = []
@@ -289,7 +289,7 @@
# compute a tuple that fully describes the instance layout
def get_full_instance_layout(w_self):
layout = w_self.layout
- return (layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable)
+ return (layout, w_self.hasdict, w_self.weakrefable)
def compute_default_mro(w_self):
return compute_C3_mro(w_self.space, w_self)
@@ -986,7 +986,7 @@
hasoldstylebase = True
continue
w_self.hasdict = w_self.hasdict or w_base.hasdict
- w_self.needsdel = w_self.needsdel or w_base.needsdel
+ w_self.hasuserdel = w_self.hasuserdel or w_base.hasuserdel
w_self.weakrefable = w_self.weakrefable or w_base.weakrefable
return hasoldstylebase
@@ -1028,7 +1028,7 @@
if wantweakref:
create_weakref_slot(w_self)
if '__del__' in dict_w:
- w_self.needsdel = True
+ w_self.hasuserdel = True
#
if index_next_extra_slot == base_layout.nslots and not force_new_layout:
return base_layout
diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py
--- a/pypy/tool/pytest/apptest.py
+++ b/pypy/tool/pytest/apptest.py
@@ -7,7 +7,7 @@
# ...unless the -A option ('runappdirect') is passed.
import py
-import sys, textwrap, types
+import sys, textwrap, types, gc
from pypy.interpreter.gateway import app2interp_temp
from pypy.interpreter.error import OperationError
from pypy.interpreter.function import Method
@@ -32,6 +32,7 @@
return traceback
def execute_appex(self, space, target, *args):
+ self.space = space
try:
target(*args)
except OperationError as e:
@@ -64,6 +65,13 @@
code = getattr(func, 'im_func', func).func_code
return "[%s:%s]" % (code.co_filename, code.co_firstlineno)
+ def track_allocations_collect(self):
+ gc.collect()
+ # must also invoke finalizers now; UserDelAction
+ # would not run at all unless invoked explicitly
+ if hasattr(self, 'space'):
+ self.space.getexecutioncontext()._run_finalizers_now()
+
class AppTestMethod(AppTestFunction):
def setup(self):
diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py
--- a/rpython/annotator/classdesc.py
+++ b/rpython/annotator/classdesc.py
@@ -579,6 +579,14 @@
if cls not in FORCE_ATTRIBUTES_INTO_CLASSES:
self.all_enforced_attrs = [] # no attribute allowed
+ if (getattr(cls, '_must_be_light_finalizer_', False) and
+ hasattr(cls, '__del__') and
+ not getattr(cls.__del__, '_must_be_light_finalizer_', False)):
+ raise AnnotatorError(
+ "Class %r is in a class hierarchy with "
+ "_must_be_light_finalizer_ = True: it cannot have a "
+ "finalizer without @rgc.must_be_light_finalizer" % (cls,))
+
def add_source_attribute(self, name, value, mixin=False):
if isinstance(value, property):
# special case for property object
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4584,6 +4584,32 @@
e = py.test.raises(Exception, a.build_types, f, [])
assert str(e.value) == "Don't know how to represent Ellipsis"
+ def test_must_be_light_finalizer(self):
+ from rpython.rlib import rgc
+ @rgc.must_be_light_finalizer
+ class A(object):
+ pass
+ class B(A):
+ def __del__(self):
+ pass
+ class C(A):
+ @rgc.must_be_light_finalizer
+ def __del__(self):
+ pass
+ class D(object):
+ def __del__(self):
+ pass
+ def fb():
+ B()
+ def fc():
+ C()
+ def fd():
+ D()
+ a = self.RPythonAnnotator()
+ a.build_types(fc, [])
+ a.build_types(fd, [])
+ py.test.raises(AnnotatorError, a.build_types, fb, [])
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/conftest.py b/rpython/conftest.py
--- a/rpython/conftest.py
+++ b/rpython/conftest.py
@@ -82,7 +82,13 @@
return
if (not getattr(item.obj, 'dont_track_allocations', False)
and leakfinder.TRACK_ALLOCATIONS):
- item._pypytest_leaks = leakfinder.stop_tracking_allocations(False)
+ kwds = {}
+ try:
+ kwds['do_collection'] = item.track_allocations_collect
+ except AttributeError:
+ pass
+ item._pypytest_leaks = leakfinder.stop_tracking_allocations(False,
+ **kwds)
else: # stop_tracking_allocations() already called
item._pypytest_leaks = None
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -362,6 +362,16 @@
return func
def must_be_light_finalizer(func):
+ """Mark a __del__ method as being a destructor, calling only a limited
+ set of operations. See pypy/doc/discussion/finalizer-order.rst.
+
+ If you use the same decorator on a class, this class and all its
+ subclasses are only allowed to have __del__ methods which are
+ similarly decorated (or no __del__ at all). It prevents a class
+ hierarchy from having destructors in some parent classes, which are
+ overridden in subclasses with (non-light, old-style) finalizers.
+ (This case is the original motivation for FinalizerQueue.)
+ """
func._must_be_light_finalizer_ = True
return func
@@ -383,6 +393,7 @@
return True
@specialize.arg(0)
+ @jit.dont_look_inside
def next_dead(self):
if we_are_translated():
from rpython.rtyper.lltypesystem.lloperation import llop
@@ -397,6 +408,7 @@
return None
@specialize.arg(0)
+ @jit.dont_look_inside
def register_finalizer(self, obj):
assert isinstance(obj, self.Class)
if we_are_translated():
@@ -418,9 +430,11 @@
self._weakrefs = set()
self._queue = collections.deque()
+ def _already_registered(self, obj):
+ return hasattr(obj, '__enable_del_for_id')
+
def _untranslated_register_finalizer(self, obj):
- if hasattr(obj, '__enable_del_for_id'):
- return # already called
+ assert not self._already_registered(obj)
if not hasattr(self, '_queue'):
self._reset()
@@ -428,14 +442,16 @@
# Fetch and check the type of 'obj'
objtyp = obj.__class__
assert isinstance(objtyp, type), (
- "to run register_finalizer() untranslated, "
- "the object's class must be new-style")
+ "%r: to run register_finalizer() untranslated, "
+ "the object's class must be new-style" % (obj,))
assert hasattr(obj, '__dict__'), (
- "to run register_finalizer() untranslated, "
- "the object must have a __dict__")
- assert not hasattr(obj, '__slots__'), (
- "to run register_finalizer() untranslated, "
- "the object must not have __slots__")
+ "%r: to run register_finalizer() untranslated, "
+ "the object must have a __dict__" % (obj,))
+ assert (not hasattr(obj, '__slots__') or
+ type(obj).__slots__ == () or
+ type(obj).__slots__ == ('__weakref__',)), (
+ "%r: to run register_finalizer() untranslated, "
+ "the object must not have __slots__" % (obj,))
# The first time, patch the method __del__ of the class, if
# any, so that we can disable it on the original 'obj' and
diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py
--- a/rpython/rlib/test/test_rgc.py
+++ b/rpython/rlib/test/test_rgc.py
@@ -327,8 +327,6 @@
fq = SimpleFQ()
w = T_Del2(42)
fq.register_finalizer(w)
- fq.register_finalizer(w)
- fq.register_finalizer(w)
del w
fq.register_finalizer(T_Del1(21))
gc.collect(); gc.collect()
diff --git a/rpython/tool/leakfinder.py b/rpython/tool/leakfinder.py
--- a/rpython/tool/leakfinder.py
+++ b/rpython/tool/leakfinder.py
@@ -37,13 +37,13 @@
ALLOCATED.clear()
return result
-def stop_tracking_allocations(check, prev=None):
+def stop_tracking_allocations(check, prev=None, do_collection=gc.collect):
global TRACK_ALLOCATIONS
assert TRACK_ALLOCATIONS
for i in range(5):
if not ALLOCATED:
break
- gc.collect()
+ do_collection()
result = ALLOCATED.copy()
ALLOCATED.clear()
if prev is None:
diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h
--- a/rpython/translator/c/src/mem.h
+++ b/rpython/translator/c/src/mem.h
@@ -109,6 +109,9 @@
#define OP_GC__ENABLE_FINALIZERS(r) (boehm_gc_finalizer_lock--, \
boehm_gc_finalizer_notifier())
+#define OP_GC_FQ_REGISTER(tag, obj, r) /* ignored so far */
+#define OP_GC_FQ_NEXT_DEAD(tag, r) (r = NULL)
+
#endif /* PYPY_USING_BOEHM_GC */
@@ -121,6 +124,8 @@
#define GC_REGISTER_FINALIZER(a, b, c, d, e) /* nothing */
#define GC_gcollect() /* nothing */
#define GC_set_max_heap_size(a) /* nothing */
+#define OP_GC_FQ_REGISTER(tag, obj, r) /* nothing */
+#define OP_GC_FQ_NEXT_DEAD(tag, r) (r = NULL)
#endif
/************************************************************/
diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py
--- a/rpython/translator/c/test/test_boehm.py
+++ b/rpython/translator/c/test/test_boehm.py
@@ -2,7 +2,7 @@
import py
-from rpython.rlib import rgc
+from rpython.rlib import rgc, debug
from rpython.rlib.objectmodel import (keepalive_until_here, compute_unique_id,
compute_hash, current_object_addr_as_int)
from rpython.rtyper.lltypesystem import lltype, llmemory
@@ -392,3 +392,23 @@
assert res[2] != compute_hash(c) # likely
assert res[3] == compute_hash(d)
assert res[4] == compute_hash(("Hi", None, (7.5, 2, d)))
+
+ def test_finalizer_queue_is_at_least_ignored(self):
+ class A(object):
+ pass
+ class FQ(rgc.FinalizerQueue):
+ Class = A
+ def finalizer_trigger(self):
+ debug.debug_print("hello!") # not called so far
From pypy.commits at gmail.com Mon May 9 04:54:24 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 01:54:24 -0700 (PDT)
Subject: [pypy-commit] pypy default: For binary compatibility with PyPy 5.1
Message-ID: <57305040.d72d1c0a.4dc63.ffffac43@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84328:c86b42dd7613
Date: 2016-05-09 10:53 +0200
http://bitbucket.org/pypy/pypy/changeset/c86b42dd7613/
Log: For binary compatibility with PyPy 5.1
diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c
--- a/pypy/module/cpyext/src/abstract.c
+++ b/pypy/module/cpyext/src/abstract.c
@@ -326,3 +326,9 @@
return tmp;
}
+/* for binary compatibility with 5.1 */
+PyAPI_FUNC(void) PyPyObject_Del(PyObject *);
+void PyPyObject_Del(PyObject *op)
+{
+ PyObject_FREE(op);
+}
From pypy.commits at gmail.com Mon May 9 05:44:55 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 09 May 2016 02:44:55 -0700 (PDT)
Subject: [pypy-commit] pypy new-jit-log: merged deafult
Message-ID: <57305c17.697ac20a.8c526.ffffdec5@mx.google.com>
Author: Richard Plangger
Branch: new-jit-log
Changeset: r84329:d46d42219c06
Date: 2016-05-09 10:13 +0200
http://bitbucket.org/pypy/pypy/changeset/d46d42219c06/
Log: merged deafult
diff too long, truncating to 2000 out of 39303 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,5 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
- def __hash__(self):
- raise TypeError("deque objects are unhashable")
+ __hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
- return struct_rusage((
- float(c_struct.ru_utime),
- float(c_struct.ru_stime),
- c_struct.ru_maxrss,
- c_struct.ru_ixrss,
- c_struct.ru_idrss,
- c_struct.ru_isrss,
- c_struct.ru_minflt,
- c_struct.ru_majflt,
- c_struct.ru_nswap,
- c_struct.ru_inblock,
- c_struct.ru_oublock,
- c_struct.ru_msgsnd,
- c_struct.ru_msgrcv,
- c_struct.ru_nsignals,
- c_struct.ru_nvcsw,
- c_struct.ru_nivcsw))
def wait3(options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait3(status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
def wait4(pid, options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait4(pid, status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+ for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include
+#include
+#include
+#include
+
+static const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+ { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+ return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+ return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+ struct rlimit rl;
+ if (getrlimit(resource, &rl) == -1)
+ return -1;
+ result[0] = rl.rlim_cur;
+ result[1] = rl.rlim_max;
+ return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+ struct rlimit rl;
+ rl.rlim_cur = cur & RLIM_INFINITY;
+ rl.rlim_max = max & RLIM_INFINITY;
+ return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+ ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -88,6 +89,12 @@
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
+ #
+ # input: `extern "Python+C" int foo(int);`
+ # output:
+ # void __cffi_extern_python_plus_c_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
- parts.append('void __cffi_extern_python_start; ')
+ if 'C' in match.group(1):
+ parts.append('void __cffi_extern_python_plus_c_start; ')
+ else:
+ parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
break
#
try:
- self._inside_extern_python = False
+ self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -376,8 +386,10 @@
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
- elif self._inside_extern_python:
+ elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
+ elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+ tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
- self._inside_extern_python = not self._inside_extern_python
- assert self._inside_extern_python == (
- decl.name == '__cffi_extern_python_start')
+ self._inside_extern_python = decl.name
else:
- if self._inside_extern_python:
+ if self._inside_extern_python !='__cffi_extern_python_stop':
raise api.CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -1145,11 +1145,11 @@
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
+ _generate_cpy_dllexport_python_collecttype = \
+ _generate_cpy_extern_python_plus_c_collecttype = \
+ _generate_cpy_extern_python_collecttype
- def _generate_cpy_dllexport_python_collecttype(self, tp, name):
- self._generate_cpy_extern_python_collecttype(tp, name)
-
- def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+ def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
@@ -1184,11 +1184,7 @@
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
- if dllexport:
- tag = 'CFFI_DLLEXPORT'
- else:
- tag = 'static'
- prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+ prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
@@ -1206,8 +1202,14 @@
prnt()
self._num_externpy += 1
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ self._extern_python_decl(tp, name, 'static ')
+
def _generate_cpy_dllexport_python_decl(self, tp, name):
- self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+ self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+ def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+ self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
@@ -1220,8 +1222,9 @@
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
- def _generate_cpy_dllexport_python_ctx(self, tp, name):
- self._generate_cpy_extern_python_ctx(tp, name)
+ _generate_cpy_dllexport_python_ctx = \
+ _generate_cpy_extern_python_plus_c_ctx = \
+ _generate_cpy_extern_python_ctx
def _string_literal(self, s):
def _char_repr(c):
diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
- size = 32 if sys.maxint <= 2**32 else 64
- filename = '_%s_%s_.py' % (basename, size)
- dumpcache.dumpcache(__file__, filename, config)
- #
- filename = os.path.join(os.path.dirname(__file__),
- '_%s_cache.py' % (basename,))
- g = open(filename, 'w')
- print >> g, '''\
-import sys
-_size = 32 if sys.maxint <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
- globals(), locals(), ["*"])
-globals().update(_mod.__dict__)\
-''' % (basename,)
- g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
- ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
- 'LC_CTYPE',
- 'LC_TIME',
- 'LC_COLLATE',
- 'LC_MONETARY',
- 'LC_MESSAGES',
- 'LC_NUMERIC',
- 'LC_ALL',
- 'CHAR_MAX',
-]
-
-class LocaleConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
- 'locale.h'])
-for key in _CONSTANTS:
- setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
- if value is None:
- del config[key]
- _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
- # list of all possible names
- langinfo_names = [
- "RADIXCHAR", "THOUSEP", "CRNCYSTR",
- "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
- "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
- "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
- ]
- for i in range(1, 8):
- langinfo_names.append("DAY_%d" % i)
- langinfo_names.append("ABDAY_%d" % i)
- for i in range(1, 13):
- langinfo_names.append("MON_%d" % i)
- langinfo_names.append("ABMON_%d" % i)
-
- class LanginfoConfigure:
- _compilation_info_ = eci
- nl_item = SimpleType('nl_item')
- for key in langinfo_names:
- setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
- langinfo_config = configure(LanginfoConfigure)
- for key, value in langinfo_config.items():
- if value is None:
- del langinfo_config[key]
- langinfo_names.remove(key)
- config.update(langinfo_config)
- _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
- filename = os.path.join(_dirpath, name)
- d = {'__file__': filename}
- path = sys.path[:]
- try:
- sys.path.insert(0, _dirpath)
- execfile(filename, d)
- finally:
- sys.path[:] = path
-
-def try_rebuild():
- size = 32 if sys.maxint <= 2**32 else 64
- # remove the files '_*_size_.py'
- left = {}
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_%s_.py' % size) or
- p.endswith('_%s_.pyc' % size)):
- os.unlink(os.path.join(_dirpath, p))
- elif p.startswith('_') and (p.endswith('_.py') or
- p.endswith('_.pyc')):
- for i in range(2, len(p)-4):
- left[p[:i]] = True
- # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_cache.py') or
- p.endswith('_cache.pyc')):
- if p[:-9] not in left:
- os.unlink(os.path.join(_dirpath, p))
- #
- for p in os.listdir(_dirpath):
- if p.endswith('.ctc.py'):
- try:
- rebuild_one(p)
- except Exception, e:
- log.ERROR("Running %s:\n %s: %s" % (
- os.path.join(_dirpath, p),
- e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
- try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
- ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
- SimpleType)
-
-
-_CONSTANTS = (
- 'RLIM_INFINITY',
- 'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
- 'RLIMIT_CPU',
- 'RLIMIT_FSIZE',
- 'RLIMIT_DATA',
- 'RLIMIT_STACK',
- 'RLIMIT_CORE',
- 'RLIMIT_RSS',
- 'RLIMIT_NPROC',
- 'RLIMIT_NOFILE',
- 'RLIMIT_OFILE',
- 'RLIMIT_MEMLOCK',
- 'RLIMIT_AS',
- 'RLIMIT_LOCKS',
- 'RLIMIT_SIGPENDING',
- 'RLIMIT_MSGQUEUE',
- 'RLIMIT_NICE',
- 'RLIMIT_RTPRIO',
- 'RLIMIT_VMEM',
-
- 'RUSAGE_BOTH',
- 'RUSAGE_SELF',
- 'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
- rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
- setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
- setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
- if config[key] is not None:
- optional_constants.append(key)
- else:
- del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -1,4 +1,4 @@
-# ctypes implementation: Victor Stinner, 2008-05-08
+# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08
"""
This module provides access to the Unix password database.
It is available on all Unix versions.
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -1,15 +1,8 @@
-import sys
-if sys.platform == 'win32':
- raise ImportError('resource module not available for win32')
+"""http://docs.python.org/library/resource"""
-# load the platform-specific cache made by running resource.ctc.py
-from ctypes_config_cache._resource_cache import *
-
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, POINTER
+from _resource_cffi import ffi, lib
from errno import EINVAL, EPERM
-import _structseq
+import _structseq, os
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -18,106 +11,37 @@
class error(Exception):
pass
+class struct_rusage:
+ """struct_rusage: Result from getrusage.
-# Read required libc functions
-_getrusage = libc.getrusage
-_getrlimit = libc.getrlimit
-_setrlimit = libc.setrlimit
-try:
- _getpagesize = libc.getpagesize
- _getpagesize.argtypes = ()
- _getpagesize.restype = c_int
-except AttributeError:
- from os import sysconf
- _getpagesize = None
+This object may be accessed either as a tuple of
+ (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
+ nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
+or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
-
-class timeval(Structure):
- _fields_ = (
- ("tv_sec", c_long),
- ("tv_usec", c_long),
- )
- def __str__(self):
- return "(%s, %s)" % (self.tv_sec, self.tv_usec)
-
- def __float__(self):
- return self.tv_sec + self.tv_usec/1000000.0
-
-class _struct_rusage(Structure):
- _fields_ = (
- ("ru_utime", timeval),
- ("ru_stime", timeval),
- ("ru_maxrss", c_long),
- ("ru_ixrss", c_long),
- ("ru_idrss", c_long),
- ("ru_isrss", c_long),
- ("ru_minflt", c_long),
- ("ru_majflt", c_long),
- ("ru_nswap", c_long),
- ("ru_inblock", c_long),
- ("ru_oublock", c_long),
- ("ru_msgsnd", c_long),
- ("ru_msgrcv", c_long),
- ("ru_nsignals", c_long),
- ("ru_nvcsw", c_long),
- ("ru_nivcsw", c_long),
- )
-
-_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
-_getrusage.restype = c_int
-
-
-class struct_rusage:
__metaclass__ = _structseq.structseqtype
- ru_utime = _structseq.structseqfield(0)
- ru_stime = _structseq.structseqfield(1)
- ru_maxrss = _structseq.structseqfield(2)
- ru_ixrss = _structseq.structseqfield(3)
- ru_idrss = _structseq.structseqfield(4)
- ru_isrss = _structseq.structseqfield(5)
- ru_minflt = _structseq.structseqfield(6)
- ru_majflt = _structseq.structseqfield(7)
- ru_nswap = _structseq.structseqfield(8)
- ru_inblock = _structseq.structseqfield(9)
- ru_oublock = _structseq.structseqfield(10)
- ru_msgsnd = _structseq.structseqfield(11)
- ru_msgrcv = _structseq.structseqfield(12)
- ru_nsignals = _structseq.structseqfield(13)
- ru_nvcsw = _structseq.structseqfield(14)
- ru_nivcsw = _structseq.structseqfield(15)
+ ru_utime = _structseq.structseqfield(0, "user time used")
+ ru_stime = _structseq.structseqfield(1, "system time used")
+ ru_maxrss = _structseq.structseqfield(2, "max. resident set size")
+ ru_ixrss = _structseq.structseqfield(3, "shared memory size")
+ ru_idrss = _structseq.structseqfield(4, "unshared data size")
+ ru_isrss = _structseq.structseqfield(5, "unshared stack size")
+ ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O")
+ ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O")
+ ru_nswap = _structseq.structseqfield(8, "number of swap outs")
+ ru_inblock = _structseq.structseqfield(9, "block input operations")
+ ru_oublock = _structseq.structseqfield(10, "block output operations")
+ ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent")
+ ru_msgrcv = _structseq.structseqfield(12, "IPC messages received")
+ ru_nsignals = _structseq.structseqfield(13,"signals received")
+ ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches")
+ ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches")
- at builtinify
-def rlimit_check_bounds(rlim_cur, rlim_max):
- if rlim_cur > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_cur)
- if rlim_max > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_max)
-
-class rlimit(Structure):
- _fields_ = (
- ("rlim_cur", rlim_t),
- ("rlim_max", rlim_t),
- )
-
-_getrlimit.argtypes = (c_int, POINTER(rlimit))
-_getrlimit.restype = c_int
-_setrlimit.argtypes = (c_int, POINTER(rlimit))
-_setrlimit.restype = c_int
-
-
- at builtinify
-def getrusage(who):
- ru = _struct_rusage()
- ret = _getrusage(who, byref(ru))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- raise ValueError("invalid who parameter")
- raise error(errno)
+def _make_struct_rusage(ru):
return struct_rusage((
- float(ru.ru_utime),
- float(ru.ru_stime),
+ lib.my_utime(ru),
+ lib.my_stime(ru),
ru.ru_maxrss,
ru.ru_ixrss,
ru.ru_idrss,
@@ -135,48 +59,59 @@
))
@builtinify
+def getrusage(who):
+ ru = ffi.new("struct rusage *")
+ if lib.getrusage(who, ru) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("invalid who parameter")
+ raise error(ffi.errno)
+ return _make_struct_rusage(ru)
+
+ at builtinify
def getrlimit(resource):
- if not(0 <= resource < RLIM_NLIMITS):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlim = rlimit()
- ret = _getrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- raise error(errno)
- return (rlim.rlim_cur, rlim.rlim_max)
+ result = ffi.new("long long[2]")
+ if lib.my_getrlimit(resource, result) == -1:
+ raise error(ffi.errno)
+ return (result[0], result[1])
@builtinify
-def setrlimit(resource, rlim):
- if not(0 <= resource < RLIM_NLIMITS):
+def setrlimit(resource, limits):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlimit_check_bounds(*rlim)
- rlim = rlimit(rlim[0], rlim[1])
- ret = _setrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- return ValueError("current limit exceeds maximum limit")
- elif errno == EPERM:
- return ValueError("not allowed to raise maximum limit")
+ limits = tuple(limits)
+ if len(limits) != 2:
+ raise ValueError("expected a tuple of 2 integers")
+
+ if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("current limit exceeds maximum limit")
+ elif ffi.errno == EPERM:
+ raise ValueError("not allowed to raise maximum limit")
else:
- raise error(errno)
+ raise error(ffi.errno)
+
@builtinify
def getpagesize():
- if _getpagesize:
- return _getpagesize()
- else:
- try:
- return sysconf("SC_PAGE_SIZE")
- except ValueError:
- # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE
- return sysconf("SC_PAGESIZE")
+ return os.sysconf("SC_PAGESIZE")
-__all__ = ALL_CONSTANTS + (
- 'error', 'timeval', 'struct_rusage', 'rlimit',
- 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize',
+
+def _setup():
+ all_constants = []
+ p = lib.my_rlimit_consts
+ while p.name:
+ name = ffi.string(p.name)
+ globals()[name] = int(p.value)
+ all_constants.append(name)
+ p += 1
+ return all_constants
+
+__all__ = tuple(_setup()) + (
+ 'error', 'getpagesize', 'struct_rusage',
+ 'getrusage', 'getrlimit', 'setrlimit',
)
-
-del ALL_CONSTANTS
+del _setup
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
# if log is not opened, open it now
if not _S_log_open:
openlog()
+ if isinstance(message, unicode):
+ message = str(message)
lib.syslog(priority, "%s", message)
@builtinify
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -222,39 +213,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
- BoolOption("withrangelist",
- "enable special range list implementation that does not "
- "actually create the full list until the resulting "
- "list is mutated",
- default=False),
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -265,22 +231,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -296,15 +250,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -317,16 +266,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev
+ tk-dev libgc-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,123 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when there is no more reference to an object. Intended for
+objects that just need to free a block of raw memory or close a file.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects;
+and if you call an external C function, it must be a "safe" function
+(e.g. not releasing the GIL; use ``releasegil=False`` in
+``rffi.llexternal()``).
+
+If there are several objects with destructors that die during the same
+GC cycle, they are called in a completely random order --- but that
+should not matter because destructors cannot do much anyway.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+It is allowed in theory to cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +137,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +241,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +252,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details ` are `available here `.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
List Optimizations
@@ -114,8 +120,8 @@
created. This gives the memory and speed behaviour of ``xrange`` and the generality
of use of ``range``, and makes ``xrange`` essentially useless.
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
User Class Optimizations
@@ -133,8 +139,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
What is PyPy?
=============
-In common parlance, PyPy has been used to mean two things. The first is the
-:ref:`RPython translation toolchain `, which is a framework for generating
-dynamic programming language implementations. And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself. It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things. The first is the
+:ref:`RPython translation toolchain ` for generating
+interpreters for dynamic programming languages. And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things. From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter. Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
:ref:`RPython translation toolchain ` when we mean the framework.
Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
--- a/pypy/doc/release-5.1.0.rst
+++ b/pypy/doc/release-5.1.0.rst
@@ -3,10 +3,17 @@
========
We have released PyPy 5.1, about a month after PyPy 5.0.
-We encourage all users of PyPy to update to this version. Apart from the usual
-bug fixes, there is an ongoing effort to improve the warmup time and memory
-usage of JIT-related metadata, and we now fully support the IBM s390x
-architecture.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
You can download the PyPy 5.1 release here:
@@ -26,6 +33,9 @@
.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
.. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
What is PyPy?
=============
@@ -46,7 +56,7 @@
* big- and little-endian variants of **PPC64** running Linux,
- * **s960x** running Linux
+ * **s390x** running Linux
.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
.. _`dynamic languages`: http://pypyjs.org
@@ -74,6 +84,8 @@
* Fix a corner case in the JIT
* Fix edge cases in the cpyext refcounting-compatible semantics
+ (more work on cpyext compatibility is coming in the ``cpyext-ext``
+ branch, but isn't ready yet)
* Try harder to not emit NEON instructions on ARM processors without NEON
support
@@ -92,11 +104,17 @@
* Fix sandbox startup (a regression in 5.0)
+ * Fix possible segfault for classes with mangled mro or __metaclass__
+
+ * Fix isinstance(deque(), Hashable) on the pure python deque
+
+ * Fix an issue with forkpty()
+
* Issues reported with our previous release were resolved_ after reports from users on
our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
#pypy
-* Numpy:
+* Numpy_:
* Implemented numpy.where for a single argument
@@ -108,6 +126,8 @@
functions exported from libpypy.so are declared in pypy_numpy.h, which is
included only when building our fork of numpy
+ * Add broadcast
+
* Performance improvements:
* Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting
@@ -119,14 +139,18 @@
* Remove the forced minor collection that occurs when rewriting the
assembler at the start of the JIT backend
+ * Port the resource module to cffi
+
* Internal refactorings:
* Use a simpler logger to speed up translation
* Drop vestiges of Python 2.5 support in testing
+ * Update rpython functions with ones needed for py3k
+
.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
-.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
Please update, and continue to help us make PyPy better.
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst
--- a/pypy/doc/whatsnew-5.1.0.rst
+++ b/pypy/doc/whatsnew-5.1.0.rst
@@ -60,3 +60,13 @@
Remove old uneeded numpy headers, what is left is only for testing. Also
generate pypy_numpy.h which exposes functions to directly use micronumpy
ndarray and ufuncs
+
+.. branch: rposix-for-3
+
+Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
+This updates the underlying rpython functions with the ones needed for the
+py3k branch
+
+.. branch: numpy_broadcast
+
+Add broadcast to micronumpy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,14 +3,84 @@
=========================
.. this is a revision shortly after release-5.1
-.. startrev: 2180e1eaf6f6
+.. startrev: aa60332382a1
-.. branch: rposix-for-3
+.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
-Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
-This updates the underlying rpython functions with the ones needed for the
-py3k branch
-
-.. branch: numpy_broadcast
From pypy.commits at gmail.com Mon May 9 05:44:57 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 09 May 2016 02:44:57 -0700 (PDT)
Subject: [pypy-commit] pypy new-jit-log: moved the debug counter in its own
file (debug.py). this was necessary to get a handle to the loop counters
when calling jitlog_disable
Message-ID: <57305c19.4d571c0a.25fa4.ffffc3f0@mx.google.com>
Author: Richard Plangger
Branch: new-jit-log
Changeset: r84330:5f0da389d027
Date: 2016-05-09 11:44 +0200
http://bitbucket.org/pypy/pypy/changeset/5f0da389d027/
Log: moved the debug counter in its own file (debug.py). this was
necessary to get a handle to the loop counters when calling
jitlog_disable
diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py
--- a/rpython/jit/backend/arm/assembler.py
+++ b/rpython/jit/backend/arm/assembler.py
@@ -14,7 +14,7 @@
CoreRegisterManager, check_imm_arg, VFPRegisterManager,
operations as regalloc_operations)
from rpython.jit.backend.llsupport import jitframe, rewrite
-from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, BaseAssembler
+from rpython.jit.backend.llsupport.assembler import BaseAssembler
from rpython.jit.backend.llsupport.regalloc import get_scale, valid_addressing_size
from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
from rpython.jit.backend.model import CompiledLoopToken
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -13,13 +13,8 @@
from rpython.rtyper.annlowlevel import cast_instance_to_gcref, llhelper
from rpython.rtyper.lltypesystem import rffi, lltype
-
-DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER',
- # 'b'ridge, 'l'abel or # 'e'ntry point
- ('i', lltype.Signed), # first field, at offset 0
- ('type', lltype.Char),
- ('number', lltype.Signed)
-)
+from rpython.jit.metainterp.debug import (DEBUG_COUNTER, LOOP_RUN_COUNTERS,
+ flush_debug_counters)
class GuardToken(object):
def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs,
@@ -362,10 +357,6 @@
ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr]))
def _register_counter(self, tp, number, token):
- # YYY very minor leak -- we need the counters to stay alive
- # forever, just because we want to report them at the end
- # of the process
-
# XXX the numbers here are ALMOST unique, but not quite, use a counter
# or something
struct = lltype.malloc(DEBUG_COUNTER, flavor='raw',
@@ -377,14 +368,15 @@
else:
assert token
struct.number = compute_unique_id(token)
- self.loop_run_counters.append(struct)
+ LOOP_RUN_COUNTERS.append(struct)
return struct
def finish_once(self, jitlog):
if self._debug:
+ # TODO remove the old logging system when jitlog is complete
debug_start('jit-backend-counts')
- for i in range(len(self.loop_run_counters)):
- struct = self.loop_run_counters[i]
+ for i in range(len(LOOP_RUN_COUNTERS)):
+ struct = LOOP_RUN_COUNTERS[i]
if struct.type == 'l':
prefix = 'TargetToken(%d)' % struct.number
else:
@@ -401,9 +393,7 @@
debug_stop('jit-backend-counts')
if jitlog:
- # this is always called, the jitlog knows if it is enabled
- for i, struct in enumerate(self.loop_run_counters):
- jitlog.log_jit_counter(struct)
+ flush_debug_counters(jitlog)
@staticmethod
@rgc.no_collect
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -3,8 +3,7 @@
import py
from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite
-from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler,
- DEBUG_COUNTER, debug_bridge)
+from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, debug_bridge)
from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
from rpython.jit.metainterp.history import (Const, VOID, ConstInt)
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1760,7 +1760,7 @@
self.cpu = cpu
self.stats = self.cpu.stats
self.options = options
- self.jitlog = jl.VMProfJitLogger()
+ self.jitlog = jl.VMProfJitLogger(self.cpu)
self.logger_noopt = Logger(self)
self.logger_ops = Logger(self, guard_number=True)
diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py
--- a/rpython/rlib/jitlog.py
+++ b/rpython/rlib/jitlog.py
@@ -222,8 +222,19 @@
content.append(encode_str(opname.lower()))
return ''.join(content)
+
+def _log_jit_counter(cintf, struct):
+ if not cintf.jitlog_enabled():
+ return
+ le_addr = encode_le_addr(struct.number)
+ # not an address (but a number) but it is a machine word
+ le_count = encode_le_addr(struct.i)
+ out = le_addr + le_count
+ cintf.jitlog_write_marked(MARK_JITLOG_COUNTER, out, len(out))
+
class VMProfJitLogger(object):
- def __init__(self):
+ def __init__(self, cpu=None):
+ self.cpu = cpu
self.cintf = cintf.setup()
self.memo = {}
self.trace_id = -1
@@ -265,12 +276,7 @@
self.cintf.jitlog_write_marked(mark, line, len(line))
def log_jit_counter(self, struct):
- if not self.cintf.jitlog_enabled():
- return
- le_addr = encode_le_addr(struct.number)
- # not an address (but a number) but it is a machine word
- le_count = encode_le_addr(struct.i)
- self._write_marked(MARK_JITLOG_COUNTER, le_addr + le_count)
+ _log_jit_counter(self.cintf, struct)
def log_trace(self, tag, metainterp_sd, mc, memo=None):
if not self.cintf.jitlog_enabled():
@@ -482,7 +488,7 @@
def copy_core_dump(self, addr, offset=0, count=-1):
dump = []
src = rffi.cast(rffi.CCHARP, addr)
- end = self.get_relative_pos()
+ end = self.mc.get_relative_pos()
if count != -1:
end = offset + count
for p in range(offset, end):
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -132,6 +132,8 @@
self.cintf.jitlog_write_marked(jl.MARK_JITLOG_HEADER, blob, len(blob))
def disable_jitlog(self):
+ from rpython.jit.metainterp.debug import flush_debug_counters
+ flush_debug_counters(self.cintf)
self.cintf.jitlog_teardown()
def disable(self):
From pypy.commits at gmail.com Mon May 9 05:49:00 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 02:49:00 -0700 (PDT)
Subject: [pypy-commit] pypy default: Extra tests,
making very very sure that new_foo() is called
Message-ID: <57305d0c.0b1f1c0a.fc792.ffffcbe6@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84331:b6f3b01b132c
Date: 2016-05-09 11:40 +0200
http://bitbucket.org/pypy/pypy/changeset/b6f3b01b132c/
Log: Extra tests, making very very sure that new_foo() is called
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -927,13 +927,16 @@
("fetchFooType", "METH_VARARGS",
"""
PyObject *o;
+ Foo_Type.tp_basicsize = sizeof(FooObject);
Foo_Type.tp_dealloc = &dealloc_foo;
- Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
+ Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
+ | Py_TPFLAGS_BASETYPE;
Foo_Type.tp_new = &new_foo;
Foo_Type.tp_free = &PyObject_Del;
if (PyType_Ready(&Foo_Type) < 0) return NULL;
o = PyObject_New(PyObject, &Foo_Type);
+ init_foo(o);
Py_DECREF(o); /* calls dealloc_foo immediately */
Py_INCREF(&Foo_Type);
@@ -944,14 +947,34 @@
return PyInt_FromLong(foo_counter);
""")], prologue=
"""
+ typedef struct {
+ PyObject_HEAD
+ int someval[99];
+ } FooObject;
static int foo_counter = 1000;
static void dealloc_foo(PyObject *foo) {
+ int i;
foo_counter += 10;
+ for (i = 0; i < 99; i++)
+ if (((FooObject *)foo)->someval[i] != 1000 + i)
+ foo_counter += 100000; /* error! */
+ Py_TYPE(foo)->tp_free(foo);
+ }
+ static void init_foo(PyObject *o)
+ {
+ int i;
+ if (o->ob_type->tp_basicsize < sizeof(FooObject))
+ abort();
+ for (i = 0; i < 99; i++)
+ ((FooObject *)o)->someval[i] = 1000 + i;
}
static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k)
{
+ PyObject *o;
foo_counter += 1000;
- return t->tp_alloc(t, 0);
+ o = t->tp_alloc(t, 0);
+ init_foo(o);
+ return o;
}
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
From pypy.commits at gmail.com Mon May 9 05:49:02 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 02:49:02 -0700 (PDT)
Subject: [pypy-commit] pypy default: Found the next bug: when you have a
Python subclass of a C API type, and
Message-ID: <57305d0e.a423c20a.f9243.ffffdb73@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84332:adc30cc041ed
Date: 2016-05-09 11:48 +0200
http://bitbucket.org/pypy/pypy/changeset/adc30cc041ed/
Log: Found the next bug: when you have a Python subclass of a C API type,
and when you instantiate this Python subclass using C code (!), then
tp_new is not called
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -942,6 +942,14 @@
Py_INCREF(&Foo_Type);
return (PyObject *)&Foo_Type;
"""),
+ ("newInstance", "METH_O",
+ """
+ PyTypeObject *tp = (PyTypeObject *)args;
+ PyObject *e = PyTuple_New(0);
+ PyObject *o = tp->tp_new(tp, e, NULL);
+ Py_DECREF(e);
+ return o;
+ """),
("getCounter", "METH_VARARGS",
"""
return PyInt_FromLong(foo_counter);
@@ -1000,3 +1008,17 @@
break
self.debug_collect()
assert module.getCounter() == 5050
+ #
+ module.newInstance(Foo)
+ for i in range(10):
+ if module.getCounter() >= 6060:
+ break
+ self.debug_collect()
+ assert module.getCounter() == 6060
+ #
+ module.newInstance(Bar)
+ for i in range(10):
+ if module.getCounter() >= 7070:
+ break
+ self.debug_collect()
+ #assert module.getCounter() == 7070 -- oops, bug!
From pypy.commits at gmail.com Mon May 9 06:33:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 03:33:07 -0700 (PDT)
Subject: [pypy-commit] pypy default: Next fix
Message-ID: <57306763.26b0c20a.ef1f4.ffffea41@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84333:e61e2f4a32fa
Date: 2016-05-09 12:33 +0200
http://bitbucket.org/pypy/pypy/changeset/e61e2f4a32fa/
Log: Next fix
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -1002,6 +1002,7 @@
#
class Bar(Foo):
pass
+ assert Foo.__new__ is Bar.__new__
Bar(); Bar()
for i in range(10):
if module.getCounter() >= 5050:
@@ -1021,4 +1022,4 @@
if module.getCounter() >= 7070:
break
self.debug_collect()
- #assert module.getCounter() == 7070 -- oops, bug!
+ assert module.getCounter() == 7070
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -196,6 +196,10 @@
def update_all_slots(space, w_type, pto):
# XXX fill slots in pto
+ # Not very sure about it, but according to
+ # test_call_tp_dealloc_when_created_from_python, we should not
+ # overwrite slots that are already set: these ones are probably
+ # coming from a parent C type.
typedef = w_type.layout.typedef
for method_name, slot_name, slot_names, slot_func in slotdefs_for_tp_slots:
@@ -223,7 +227,8 @@
# XXX special case wrapper-functions and use a "specific" slot func
if len(slot_names) == 1:
- setattr(pto, slot_names[0], slot_func_helper)
+ if not getattr(pto, slot_names[0]):
+ setattr(pto, slot_names[0], slot_func_helper)
else:
assert len(slot_names) == 2
struct = getattr(pto, slot_names[0])
@@ -240,7 +245,8 @@
struct = lltype.malloc(STRUCT_TYPE, flavor='raw', zero=True)
setattr(pto, slot_names[0], struct)
- setattr(struct, slot_names[1], slot_func_helper)
+ if not getattr(struct, slot_names[1]):
+ setattr(struct, slot_names[1], slot_func_helper)
def add_operators(space, dict_w, pto):
# XXX support PyObject_HashNotImplemented
From pypy.commits at gmail.com Mon May 9 06:44:04 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 09 May 2016 03:44:04 -0700 (PDT)
Subject: [pypy-commit] pypy new-jit-log: provided wrong argument to
flush_debug_counters
Message-ID: <573069f4.26b0c20a.ef1f4.ffffef48@mx.google.com>
Author: Richard Plangger
Branch: new-jit-log
Changeset: r84334:84716af5f182
Date: 2016-05-09 11:49 +0200
http://bitbucket.org/pypy/pypy/changeset/84716af5f182/
Log: provided wrong argument to flush_debug_counters
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -393,7 +393,7 @@
debug_stop('jit-backend-counts')
if jitlog:
- flush_debug_counters(jitlog)
+ flush_debug_counters(jitlog.cintf)
@staticmethod
@rgc.no_collect
From pypy.commits at gmail.com Mon May 9 06:44:06 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 09 May 2016 03:44:06 -0700 (PDT)
Subject: [pypy-commit] pypy new-jit-log: forgot file (thx armin)
Message-ID: <573069f6.8344c20a.2d101.fffffb91@mx.google.com>
Author: Richard Plangger
Branch: new-jit-log
Changeset: r84335:f60619823f3b
Date: 2016-05-09 12:43 +0200
http://bitbucket.org/pypy/pypy/changeset/f60619823f3b/
Log: forgot file (thx armin)
diff --git a/rpython/jit/metainterp/debug.py b/rpython/jit/metainterp/debug.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/metainterp/debug.py
@@ -0,0 +1,27 @@
+from rpython.rtyper.lltypesystem import rffi, lltype
+from rpython.rlib.jitlog import _log_jit_counter
+
+# YYY very minor leak -- we need the counters to stay alive
+# forever, just because we want to report them at the end
+# of the process
+
+LOOP_RUN_COUNTERS = []
+
+DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER',
+ # 'b'ridge, 'l'abel or # 'e'ntry point
+ ('i', lltype.Signed), # first field, at offset 0
+ ('type', lltype.Char),
+ ('number', lltype.Signed)
+)
+
+def flush_debug_counters(cintf):
+ # this is always called, the jitlog knows if it is enabled
+ for i in range(len(LOOP_RUN_COUNTERS)):
+ struct = LOOP_RUN_COUNTERS[i]
+ _log_jit_counter(cintf, struct)
+ # reset the counter, flush in a later point in time will
+ # add up the counters!
+ struct.i = 0
+ # here would be the point to free some counters
+ # see YYY comment above! but first we should run this every once in a while
+ # not just when jitlog_disable is called
From pypy.commits at gmail.com Mon May 9 08:04:38 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 09 May 2016 05:04:38 -0700 (PDT)
Subject: [pypy-commit] pypy default: Blind fix for issue #2285: rare vmprof
segfaults on OS/X
Message-ID: <57307cd6.0b1f1c0a.fc792.0b3b@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84336:57e12f1aa41b
Date: 2016-05-09 14:04 +0200
http://bitbucket.org/pypy/pypy/changeset/57e12f1aa41b/
Log: Blind fix for issue #2285: rare vmprof segfaults on OS/X
diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h
--- a/rpython/rlib/rvmprof/src/vmprof_common.h
+++ b/rpython/rlib/rvmprof/src/vmprof_common.h
@@ -82,6 +82,10 @@
int n = 0;
intptr_t addr = 0;
int bottom_jitted = 0;
+
+ if (stack == NULL)
+ return 0;
+
// check if the pc is in JIT
#ifdef PYPY_JIT_CODEMAP
if (pypy_find_codemap_at_addr((intptr_t)pc, &addr)) {
@@ -111,7 +115,12 @@
#ifndef RPYTHON_LL2CTYPES
static vmprof_stack_t *get_vmprof_stack(void)
{
- return RPY_THREADLOCALREF_GET(vmprof_tl_stack);
+ struct pypy_threadlocal_s *tl;
+ _OP_THREADLOCALREF_ADDR_SIGHANDLER(tl);
+ if (tl == NULL)
+ return NULL;
+ else
+ return tl->vmprof_tl_stack;
}
#else
static vmprof_stack_t *get_vmprof_stack(void)
diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h
--- a/rpython/translator/c/src/threadlocal.h
+++ b/rpython/translator/c/src/threadlocal.h
@@ -53,6 +53,13 @@
r = _RPython_ThreadLocals_Build(); \
} while (0)
+#define _OP_THREADLOCALREF_ADDR_SIGHANDLER(r) \
+ do { \
+ r = (char *)&pypy_threadlocal; \
+ if (pypy_threadlocal.ready != 42) \
+ r = NULL; \
+ } while (0)
+
#define RPY_THREADLOCALREF_ENSURE() \
if (pypy_threadlocal.ready != 42) \
(void)_RPython_ThreadLocals_Build();
@@ -87,6 +94,11 @@
r = _RPython_ThreadLocals_Build(); \
} while (0)
+#define _OP_THREADLOCALREF_ADDR_SIGHANDLER(r) \
+ do { \
+ r = (char *)_RPy_ThreadLocals_Get(); \
+ } while (0)
+
#define RPY_THREADLOCALREF_ENSURE() \
if (!_RPy_ThreadLocals_Get()) \
(void)_RPython_ThreadLocals_Build();
From pypy.commits at gmail.com Mon May 9 12:04:37 2016
From: pypy.commits at gmail.com (william_ml_leslie)
Date: Mon, 09 May 2016 09:04:37 -0700 (PDT)
Subject: [pypy-commit] pypy taskengine-sorted-optionals: Optional
dependencies should take part in ordering even if they become non-optional
'later'
Message-ID: <5730b515.49961c0a.938e1.656c@mx.google.com>
Author: William ML Leslie
Branch: taskengine-sorted-optionals
Changeset: r84337:cb82010dadbc
Date: 2016-05-10 02:02 +1000
http://bitbucket.org/pypy/pypy/changeset/cb82010dadbc/
Log: Optional dependencies should take part in ordering even if they
become non-optional 'later'
diff --git a/rpython/translator/tool/taskengine.py b/rpython/translator/tool/taskengine.py
--- a/rpython/translator/tool/taskengine.py
+++ b/rpython/translator/tool/taskengine.py
@@ -13,7 +13,7 @@
tasks[task_name] = task, task_deps
- def _plan(self, goals, skip=[]):
+ def _plan(self, goals, skip=()):
skip = [toskip for toskip in skip if toskip not in goals]
key = (tuple(goals), tuple(skip))
@@ -21,64 +21,46 @@
return self._plan_cache[key]
except KeyError:
pass
- constraints = []
-
- def subgoals(task_name):
- taskcallable, deps = self.tasks[task_name]
- for dep in deps:
- if dep.startswith('??'): # optional
- dep = dep[2:]
- if dep not in goals:
- continue
- if dep.startswith('?'): # suggested
- dep = dep[1:]
- if dep in skip:
- continue
- yield dep
-
- seen = {}
-
- def consider(subgoal):
- if subgoal in seen:
- return
- else:
- seen[subgoal] = True
- constraints.append([subgoal])
- deps = subgoals(subgoal)
- for dep in deps:
- constraints.append([subgoal, dep])
- consider(dep)
-
- for goal in goals:
- consider(goal)
-
- #sort
plan = []
+ goal_walker = goals[::-1]
+ flattened_goals = []
+ for base_goal in goals[::-1]:
+ goal_walker = [base_goal]
+ dep_walker = [iter(self.tasks[base_goal.lstrip('?')][1])]
+ while goal_walker:
+ for subgoal in dep_walker[-1]:
+ break
+ else:
+ # all dependencies are in flattened_goals. record
+ # this goal.
+ dep_walker.pop()
+ goal = goal_walker.pop()
+ if goal not in flattened_goals:
+ flattened_goals.append(goal)
+ continue
+ if subgoal in goal_walker:
+ raise RuntimeException('circular dependency')
- while True:
- cands = dict.fromkeys([constr[0] for constr in constraints if constr])
- if not cands:
- break
+ # subgoal must be at least as optional as its parent
+ qs = goal_walker[-1].count('?')
+ if subgoal.count('?') < qs:
+ subgoal = '?' * qs + subgoal.lstrip('?')
- for cand in cands:
- for constr in constraints:
- if cand in constr[1:]:
- break
- else:
- break
- else:
- raise RuntimeError("circular dependecy")
+ # we'll add this goal once we have its dependencies.
+ goal_walker.append(subgoal)
+ dep_walker.append(iter(self.tasks[subgoal.lstrip('?')][1]))
- plan.append(cand)
- for constr in constraints:
- if constr and constr[0] == cand:
- del constr[0]
-
- plan.reverse()
-
+ plan = []
+ for name in flattened_goals:
+ name = name.lstrip('?')
+ if name in plan:
+ continue
+ will_run = name in flattened_goals or (
+ '?' + name in flattened_goals and name not in skip)
+ if will_run:
+ plan.append(name)
self._plan_cache[key] = plan
-
return plan
def _depending_on(self, goal):
diff --git a/rpython/translator/tool/test/test_taskengine.py b/rpython/translator/tool/test/test_taskengine.py
--- a/rpython/translator/tool/test/test_taskengine.py
+++ b/rpython/translator/tool/test/test_taskengine.py
@@ -148,3 +148,29 @@
assert drv._plan(['D', 'T', 'R']) == ['A', 'R', 'b', 'H', 'T', 'B', 'D']
assert drv._plan(['D', 'T']) == ['A', 'R', 'b', 'H', 'T', 'B', 'D']
assert drv._plan(['D', 'T'], skip=['B']) == ['A', 'R', 'b', 'H', 'T', 'D']
+
+
+def test_can_be_optional():
+ class Drv(SimpleTaskEngine):
+ def task_A():
+ pass
+
+ def task_B():
+ pass
+
+ task_B.task_deps = ['??A']
+
+ def task_C():
+ pass
+
+ task_C.task_deps = ['??B']
+
+ def task_D():
+ pass
+
+ task_D.task_deps = ['B', 'C']
+
+ drv = Drv()
+ assert drv._plan(['D']) == ['B', 'C', 'D']
+ assert drv._plan(['B', 'D']) == ['B', 'C', 'D']
+ assert drv._plan(['A', 'D']) == ['A', 'B', 'C', 'D']
From pypy.commits at gmail.com Mon May 9 13:09:06 2016
From: pypy.commits at gmail.com (william_ml_leslie)
Date: Mon, 09 May 2016 10:09:06 -0700 (PDT)
Subject: [pypy-commit] pypy taskengine-sorted-optionals: Use toposort +
lattice
Message-ID: <5730c432.06921c0a.1e1d5.ffff8d3e@mx.google.com>
Author: William ML Leslie
Branch: taskengine-sorted-optionals
Changeset: r84338:76a012472eda
Date: 2016-05-10 03:08 +1000
http://bitbucket.org/pypy/pypy/changeset/76a012472eda/
Log: Use toposort + lattice
diff --git a/rpython/translator/tool/taskengine.py b/rpython/translator/tool/taskengine.py
--- a/rpython/translator/tool/taskengine.py
+++ b/rpython/translator/tool/taskengine.py
@@ -22,44 +22,57 @@
except KeyError:
pass
- plan = []
- goal_walker = goals[::-1]
- flattened_goals = []
- for base_goal in goals[::-1]:
- goal_walker = [base_goal]
- dep_walker = [iter(self.tasks[base_goal.lstrip('?')][1])]
- while goal_walker:
- for subgoal in dep_walker[-1]:
- break
- else:
- # all dependencies are in flattened_goals. record
- # this goal.
- dep_walker.pop()
- goal = goal_walker.pop()
- if goal not in flattened_goals:
- flattened_goals.append(goal)
- continue
- if subgoal in goal_walker:
- raise RuntimeException('circular dependency')
+ optionality = dict((goal.lstrip('?'), goal.count('?'))
+ for goal in goals)
+ task_deps = {}
- # subgoal must be at least as optional as its parent
- qs = goal_walker[-1].count('?')
- if subgoal.count('?') < qs:
- subgoal = '?' * qs + subgoal.lstrip('?')
+ def will_do(task):
+ priority = optionality[task]
+ if priority < 1:
+ return True
+ return priority == 1 and task not in skip
- # we'll add this goal once we have its dependencies.
- goal_walker.append(subgoal)
- dep_walker.append(iter(self.tasks[subgoal.lstrip('?')][1]))
+ goal_walker = list(goals[::-1])
+ while goal_walker:
+ goal = goal_walker.pop()
+ qs = optionality.get(goal, 0)
+ if goal not in task_deps:
+ task_deps[goal] = deps = set()
+ for dep in self.tasks[goal][1]:
+ deps.add(dep.lstrip('?'))
+ for dep in self.tasks[goal][1]:
+ depname = dep.lstrip('?')
+ def_optionality = optionality.get(depname, 5)
+ dep_qs = max(qs, dep.count('?'))
+ if dep_qs < def_optionality:
+ optionality[depname] = dep_qs
+ goal_walker.append(depname)
+
+ for task, deps in list(task_deps.iteritems()):
+ if not will_do(task):
+ del task_deps[task]
+ else:
+ if task in deps:
+ deps.remove(task)
+ for dep in list(deps):
+ if not will_do(dep):
+ deps.remove(dep)
plan = []
- for name in flattened_goals:
- name = name.lstrip('?')
- if name in plan:
- continue
- will_run = name in flattened_goals or (
- '?' + name in flattened_goals and name not in skip)
- if will_run:
- plan.append(name)
+ seen = set()
+ tasks = list(task_deps)
+ while tasks:
+ remaining = []
+ for task in tasks:
+ if task_deps[task] - seen:
+ remaining.append(task)
+ else:
+ plan.append(task)
+ seen.add(task)
+ if len(remaining) == len(tasks):
+ raise RuntimeException('circular dependency')
+ tasks = remaining
+
self._plan_cache[key] = plan
return plan
From pypy.commits at gmail.com Mon May 9 14:46:19 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 09 May 2016 11:46:19 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast: merge default into branch
Message-ID: <5730dafb.01341c0a.82308.ffff9e7b@mx.google.com>
Author: Matti Picus
Branch: cpyext-macros-cast
Changeset: r84339:664e7d4392f4
Date: 2016-05-09 21:42 +0300
http://bitbucket.org/pypy/pypy/changeset/664e7d4392f4/
Log: merge default into branch
diff too long, truncating to 2000 out of 3197 lines
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -33,26 +33,25 @@
it from a finalizer. A finalizer runs earlier, and in topological
order; care must be taken that the object might still be reachable at
this point if we're clever enough. A destructor on the other hand runs
-last; nothing can be done with the object any more.
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
Destructors
-----------
A destructor is an RPython ``__del__()`` method that is called directly
-by the GC when there is no more reference to an object. Intended for
-objects that just need to free a block of raw memory or close a file.
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
There are restrictions on the kind of code you can put in ``__del__()``,
including all other functions called by it. These restrictions are
-checked. In particular you cannot access fields containing GC objects;
-and if you call an external C function, it must be a "safe" function
-(e.g. not releasing the GIL; use ``releasegil=False`` in
-``rffi.llexternal()``).
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
-If there are several objects with destructors that die during the same
-GC cycle, they are called in a completely random order --- but that
-should not matter because destructors cannot do much anyway.
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
Register_finalizer
@@ -95,10 +94,15 @@
To find the queued items, call ``fin.next_dead()`` repeatedly. It
returns the next queued item, or ``None`` when the queue is empty.
-It is allowed in theory to cumulate several different
+In theory, it would kind of work if you cumulate several different
``FinalizerQueue`` instances for objects of the same class, and
(always in theory) the same ``obj`` could be registered several times
in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
Ordering of finalizers
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -79,3 +79,13 @@
It is a more flexible way to make RPython finalizers.
.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
+.. branch: cpyext-more-slots
+
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,37 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
+ """
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
"""
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -389,9 +401,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
@@ -1844,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
@@ -515,75 +521,98 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+ self._call_finalizer(w_obj)
+
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
+ pdd = self.pending_with_disabled_del
+ if pdd is None:
+ return False
+ else:
+ pdd.append(w_obj)
+ return True
+
+ def _call_finalizer(self, w_obj):
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
space = self.space
- while pending is not None:
+ if w_obj.typedef is None:
+ w_del = None # obscure case: for WeakrefLifeline
+ else:
+ w_del = space.lookup(w_obj, '__del__')
+ if w_del is not None:
+ if self.gc_disabled(w_obj):
+ return
try:
- pending.callback(pending.w_obj)
- except OperationError as e:
- e.write_unraisable(space, pending.descrname, pending.w_obj)
- e.clear(space) # break up reference cycles
- pending = pending.next
- #
- # Note: 'dying_objects' used to be just a regular list instead
- # of a chained list. This was the cause of "leaks" if we have a
- # program that constantly creates new objects with finalizers.
- # Here is why: say 'dying_objects' is a long list, and there
- # are n instances in it. Then we spend some time in this
- # function, possibly triggering more GCs, but keeping the list
- # of length n alive. Then the list is suddenly freed at the
- # end, and we return to the user program. At this point the
- # GC limit is still very high, because just before, there was
- # a list of length n alive. Assume that the program continues
- # to allocate a lot of instances with finalizers. The high GC
- # limit means that it could allocate a lot of instances before
- # reaching it --- possibly more than n. So the whole procedure
- # repeats with higher and higher values of n.
- #
- # This does not occur in the current implementation because
- # there is no list of length n: if n is large, then the GC
- # will run several times while walking the list, but it will
- # see lower and lower memory usage, with no lower bound of n.
+ space.get_and_call_function(w_del, w_obj)
+ except Exception as e:
+ report_error(space, e, "method __del__ of ", w_obj)
+
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ report_error(space, e, "finalizer of ", w_obj)
+
+
+def report_error(space, e, where, w_obj):
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
+
+
+def make_finalizer_queue(W_Root, space):
+ """Make a FinalizerQueue subclass which responds to GC finalizer
+ events by 'firing' the UserDelAction class above. It does not
+ directly fetches the objects to finalize at all; they stay in the
+ GC-managed queue, and will only be fetched by UserDelAction
+ (between bytecodes)."""
+
+ class WRootFinalizerQueue(rgc.FinalizerQueue):
+ Class = W_Root
+
+ def finalizer_trigger(self):
+ space.user_del_action.fire()
+
+ space.user_del_action = UserDelAction(space)
+ space.finalizer_queue = WRootFinalizerQueue()
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,6 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
+from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY
from rpython.rlib import jit
@@ -13,6 +14,8 @@
self.frame = frame # turned into None when frame_finished_execution
self.pycode = frame.pycode
self.running = False
+ if self.pycode.co_flags & CO_YIELD_INSIDE_TRY:
+ self.register_finalizer(self.space)
def descr__repr__(self, space):
if self.pycode is None:
@@ -139,7 +142,6 @@
def descr_close(self):
"""x.close(arg) -> raise GeneratorExit inside generator."""
- assert isinstance(self, GeneratorIterator)
space = self.space
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
@@ -212,25 +214,21 @@
unpack_into = _create_unpack_into()
unpack_into_w = _create_unpack_into()
-
-class GeneratorIteratorWithDel(GeneratorIterator):
-
- def __del__(self):
- # Only bother enqueuing self to raise an exception if the frame is
- # still not finished and finally or except blocks are present.
- self.clear_all_weakrefs()
+ def _finalize_(self):
+ # This is only called if the CO_YIELD_INSIDE_TRY flag is set
+ # on the code object. If the frame is still not finished and
+ # finally or except blocks are present at the current
+ # position, then raise a GeneratorExit. Otherwise, there is
+ # no point.
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
if not isinstance(block, LoopBlock):
- self.enqueue_for_destruction(self.space,
- GeneratorIterator.descr_close,
- "interrupting generator of ")
+ self.descr_close()
break
block = block.previous
-
def get_printable_location_genentry(bytecode):
return '%s ' % (bytecode.get_repr(),)
generatorentry_driver = jit.JitDriver(greens=['pycode'],
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -241,12 +241,8 @@
def run(self):
"""Start this frame's execution."""
if self.getcode().co_flags & pycode.CO_GENERATOR:
- if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY:
- from pypy.interpreter.generator import GeneratorIteratorWithDel
- return self.space.wrap(GeneratorIteratorWithDel(self))
- else:
- from pypy.interpreter.generator import GeneratorIterator
- return self.space.wrap(GeneratorIterator(self))
+ from pypy.interpreter.generator import GeneratorIterator
+ return self.space.wrap(GeneratorIterator(self))
else:
return self.execute_frame()
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -127,10 +127,7 @@
""" % (slots, methodname, checks[0], checks[1],
checks[2], checks[3]))
subclasses = {}
- for key, subcls in typedef._subclass_cache.items():
- if key[0] is not space.config:
- continue
- cls = key[1]
+ for cls, subcls in typedef._unique_subclass_cache.items():
subclasses.setdefault(cls, {})
prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls)
assert subcls is prevsubcls
@@ -186,35 +183,20 @@
class W_Level1(W_Root):
def __init__(self, space1):
assert space1 is space
- def __del__(self):
+ self.register_finalizer(space)
+ def _finalize_(self):
space.call_method(w_seen, 'append', space.wrap(1))
- class W_Level2(W_Root):
- def __init__(self, space1):
- assert space1 is space
- def __del__(self):
- self.enqueue_for_destruction(space, W_Level2.destructormeth,
- 'FOO ')
- def destructormeth(self):
- space.call_method(w_seen, 'append', space.wrap(2))
W_Level1.typedef = typedef.TypeDef(
'level1',
__new__ = typedef.generic_new_descr(W_Level1))
- W_Level2.typedef = typedef.TypeDef(
- 'level2',
- __new__ = typedef.generic_new_descr(W_Level2))
#
w_seen = space.newlist([])
W_Level1(space)
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [1]
- #
- w_seen = space.newlist([])
- W_Level2(space)
- gc.collect(); gc.collect()
assert space.str_w(space.repr(w_seen)) == "[]" # not called yet
ec = space.getexecutioncontext()
self.space.user_del_action.perform(ec, None)
- assert space.unwrap(w_seen) == [2]
+ assert space.unwrap(w_seen) == [1] # called by user_del_action
#
w_seen = space.newlist([])
self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
@@ -236,29 +218,17 @@
A4()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [4, 1]
+ assert space.unwrap(w_seen) == [4, 1] # user __del__, and _finalize_
#
w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef)],
+ self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
"""(level2):
class A5(level2):
pass
A5()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [2]
- #
- w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef),
- w_seen],
- """(level2, seen):
- class A6(level2):
- def __del__(self):
- seen.append(6)
- A6()
- """)
- gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [6, 2]
+ assert space.unwrap(w_seen) == [1] # _finalize_ only
def test_multiple_inheritance(self):
class W_A(W_Root):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -24,6 +24,8 @@
self.bases = bases
self.heaptype = False
self.hasdict = '__dict__' in rawdict
+ # no __del__: use an RPython _finalize_() method and register_finalizer
+ assert '__del__' not in rawdict
self.weakrefable = '__weakref__' in rawdict
self.doc = rawdict.pop('__doc__', None)
for base in bases:
@@ -103,26 +105,20 @@
# we need two subclasses of the app-level type, one to add mapdict, and then one
# to add del to not slow down the GC.
-def get_unique_interplevel_subclass(space, cls, needsdel=False):
+def get_unique_interplevel_subclass(space, cls):
"NOT_RPYTHON: initialization-time only"
- if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
- needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = space, cls, needsdel
try:
- return _subclass_cache[key]
+ return _unique_subclass_cache[cls]
except KeyError:
- # XXX can save a class if cls already has a __del__
- if needsdel:
- cls = get_unique_interplevel_subclass(space, cls, False)
- subcls = _getusercls(space, cls, needsdel)
- assert key not in _subclass_cache
- _subclass_cache[key] = subcls
+ subcls = _getusercls(cls)
+ assert cls not in _unique_subclass_cache
+ _unique_subclass_cache[cls] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
-_subclass_cache = {}
+_unique_subclass_cache = {}
-def _getusercls(space, cls, wants_del, reallywantdict=False):
+def _getusercls(cls, reallywantdict=False):
from rpython.rlib import objectmodel
from pypy.objspace.std.objectobject import W_ObjectObject
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
@@ -132,11 +128,10 @@
typedef = cls.typedef
name = cls.__name__ + "User"
- mixins_needed = []
if cls is W_ObjectObject or cls is W_InstanceObject:
- mixins_needed.append(_make_storage_mixin_size_n())
+ base_mixin = _make_storage_mixin_size_n()
else:
- mixins_needed.append(MapdictStorageMixin)
+ base_mixin = MapdictStorageMixin
copy_methods = [BaseUserClassMapdict]
if reallywantdict or not typedef.hasdict:
# the type has no dict, mapdict to provide the dict
@@ -147,44 +142,12 @@
# support
copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
- if wants_del:
- # This subclass comes with an app-level __del__. To handle
- # it, we make an RPython-level __del__ method. This
- # RPython-level method is called directly by the GC and it
- # cannot do random things (calling the app-level __del__ would
- # be "random things"). So instead, we just call here
- # enqueue_for_destruction(), and the app-level __del__ will be
- # called later at a safe point (typically between bytecodes).
- # If there is also an inherited RPython-level __del__, it is
- # called afterwards---not immediately! This base
- # RPython-level __del__ is supposed to run only when the
- # object is not reachable any more. NOTE: it doesn't fully
- # work: see issue #2287.
- name += "Del"
- parent_destructor = getattr(cls, '__del__', None)
- def call_parent_del(self):
- assert isinstance(self, subcls)
- parent_destructor(self)
- def call_applevel_del(self):
- assert isinstance(self, subcls)
- space.userdel(self)
- class Proto(object):
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(space, call_applevel_del,
- 'method __del__ of ')
- if parent_destructor is not None:
- self.enqueue_for_destruction(space, call_parent_del,
- 'internal destructor of ')
- mixins_needed.append(Proto)
class subcls(cls):
user_overridden_class = True
- for base in mixins_needed:
- objectmodel.import_from_mixin(base)
+ objectmodel.import_from_mixin(base_mixin)
for copycls in copy_methods:
_copy_methods(copycls, subcls)
- del subcls.base
subcls.__name__ = name
return subcls
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -44,13 +44,12 @@
self.bases_w = bases
self.w_dict = w_dict
+ def has_user_del(self, space):
+ return self.lookup(space, '__del__') is not None
+
def instantiate(self, space):
cache = space.fromcache(Cache)
- if self.lookup(space, '__del__') is not None:
- w_inst = cache.cls_with_del(space, self)
- else:
- w_inst = cache.cls_without_del(space, self)
- return w_inst
+ return cache.InstanceObjectCls(space, self)
def getdict(self, space):
return self.w_dict
@@ -132,9 +131,9 @@
self.setbases(space, w_value)
return
elif name == "__del__":
- if self.lookup(space, name) is None:
+ if not self.has_user_del(space):
msg = ("a __del__ method added to an existing class will "
- "not be called")
+ "only be called on instances made from now on")
space.warn(space.wrap(msg), space.w_RuntimeWarning)
space.setitem(self.w_dict, w_attr, w_value)
@@ -184,14 +183,11 @@
if hasattr(space, 'is_fake_objspace'):
# hack: with the fake objspace, we don't want to see typedef's
# _getusercls() at all
- self.cls_without_del = W_InstanceObject
- self.cls_with_del = W_InstanceObject
+ self.InstanceObjectCls = W_InstanceObject
return
- self.cls_without_del = _getusercls(
- space, W_InstanceObject, False, reallywantdict=True)
- self.cls_with_del = _getusercls(
- space, W_InstanceObject, True, reallywantdict=True)
+ self.InstanceObjectCls = _getusercls(
+ W_InstanceObject, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
@@ -297,12 +293,15 @@
class W_InstanceObject(W_Root):
def __init__(self, space, w_class):
# note that user_setup is overridden by the typedef.py machinery
+ self.space = space
self.user_setup(space, space.gettypeobject(self.typedef))
assert isinstance(w_class, W_ClassObject)
self.w_class = w_class
+ if w_class.has_user_del(space):
+ space.finalizer_queue.register_finalizer(self)
def user_setup(self, space, w_subtype):
- self.space = space
+ pass
def set_oldstyle_class(self, space, w_class):
if w_class is None or not isinstance(w_class, W_ClassObject):
@@ -368,8 +367,7 @@
self.set_oldstyle_class(space, w_value)
return
if name == '__del__' and w_meth is None:
- cache = space.fromcache(Cache)
- if (not isinstance(self, cache.cls_with_del)
+ if (not self.w_class.has_user_del(space)
and self.getdictvalue(space, '__del__') is None):
msg = ("a __del__ method added to an instance with no "
"__del__ in the class will not be called")
@@ -646,13 +644,14 @@
raise oefmt(space.w_TypeError, "instance has no next() method")
return space.call_function(w_func)
- def descr_del(self, space):
- # Note that this is called from executioncontext.UserDelAction
- # via the space.userdel() method.
+ def _finalize_(self):
+ space = self.space
w_func = self.getdictvalue(space, '__del__')
if w_func is None:
w_func = self.getattr_from_class(space, '__del__')
if w_func is not None:
+ if self.space.user_del_action.gc_disabled(self):
+ return
space.call_function(w_func)
def descr_exit(self, space, w_type, w_value, w_tb):
@@ -729,7 +728,6 @@
__pow__ = interp2app(W_InstanceObject.descr_pow),
__rpow__ = interp2app(W_InstanceObject.descr_rpow),
next = interp2app(W_InstanceObject.descr_next),
- __del__ = interp2app(W_InstanceObject.descr_del),
__exit__ = interp2app(W_InstanceObject.descr_exit),
__dict__ = dict_descr,
**rawdict
diff --git a/pypy/module/_cffi_backend/allocator.py b/pypy/module/_cffi_backend/allocator.py
--- a/pypy/module/_cffi_backend/allocator.py
+++ b/pypy/module/_cffi_backend/allocator.py
@@ -45,14 +45,11 @@
rffi.c_memset(rffi.cast(rffi.VOIDP, ptr), 0,
rffi.cast(rffi.SIZE_T, datasize))
#
- if self.w_free is None:
- # use this class which does not have a __del__, but still
- # keeps alive w_raw_cdata
- res = cdataobj.W_CDataNewNonStdNoFree(space, ptr, ctype, length)
- else:
- res = cdataobj.W_CDataNewNonStdFree(space, ptr, ctype, length)
+ res = cdataobj.W_CDataNewNonStd(space, ptr, ctype, length)
+ res.w_raw_cdata = w_raw_cdata
+ if self.w_free is not None:
res.w_free = self.w_free
- res.w_raw_cdata = w_raw_cdata
+ res.register_finalizer(space)
return res
@unwrap_spec(w_init=WrappedDefault(None))
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -71,7 +71,7 @@
def nonzero(self):
with self as ptr:
- nonzero = bool(ptr)
+ nonzero = self.ctype.nonzero(ptr)
return self.space.wrap(nonzero)
def int(self, space):
@@ -365,8 +365,16 @@
return self.ctype.size
def with_gc(self, w_destructor):
+ space = self.space
+ if space.is_none(w_destructor):
+ if isinstance(self, W_CDataGCP):
+ self.w_destructor = None
+ return space.w_None
+ raise oefmt(space.w_TypeError,
+ "Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
with self as ptr:
- return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor)
+ return W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
def unpack(self, length):
from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
@@ -441,22 +449,11 @@
lltype.free(self._ptr, flavor='raw')
-class W_CDataNewNonStdNoFree(W_CDataNewOwning):
- """Subclass using a non-standard allocator, no free()"""
- _attrs_ = ['w_raw_cdata']
+class W_CDataNewNonStd(W_CDataNewOwning):
+ """Subclass using a non-standard allocator"""
+ _attrs_ = ['w_raw_cdata', 'w_free']
-class W_CDataNewNonStdFree(W_CDataNewNonStdNoFree):
- """Subclass using a non-standard allocator, with a free()"""
- _attrs_ = ['w_free']
-
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space,
- W_CDataNewNonStdFree.call_destructor,
- 'destructor of ')
-
- def call_destructor(self):
- assert isinstance(self, W_CDataNewNonStdFree)
+ def _finalize_(self):
self.space.call_function(self.w_free, self.w_raw_cdata)
@@ -538,21 +535,19 @@
class W_CDataGCP(W_CData):
"""For ffi.gc()."""
_attrs_ = ['w_original_cdata', 'w_destructor']
- _immutable_fields_ = ['w_original_cdata', 'w_destructor']
+ _immutable_fields_ = ['w_original_cdata']
def __init__(self, space, cdata, ctype, w_original_cdata, w_destructor):
W_CData.__init__(self, space, cdata, ctype)
self.w_original_cdata = w_original_cdata
self.w_destructor = w_destructor
+ self.register_finalizer(space)
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_CDataGCP.call_destructor,
- 'destructor of ')
-
- def call_destructor(self):
- assert isinstance(self, W_CDataGCP)
- self.space.call_function(self.w_destructor, self.w_original_cdata)
+ def _finalize_(self):
+ w_destructor = self.w_destructor
+ if w_destructor is not None:
+ self.w_destructor = None
+ self.space.call_function(w_destructor, self.w_original_cdata)
W_CData.typedef = TypeDef(
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -25,10 +25,13 @@
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
+ self.register_finalizer(ffi.space)
- def __del__(self):
- if self.libhandle:
- dlclose(self.libhandle)
+ def _finalize_(self):
+ h = self.libhandle
+ if h != rffi.cast(DLLHANDLE, 0):
+ self.libhandle = rffi.cast(DLLHANDLE, 0)
+ dlclose(h)
def cdlopen_fetch(self, name):
if not self.libhandle:
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -147,6 +147,9 @@
raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number",
self.name)
+ def nonzero(self, cdata):
+ return bool(cdata)
+
def insert_name(self, extra, extra_position):
name = '%s%s%s' % (self.name[:self.name_position],
extra,
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -93,6 +93,18 @@
return self.space.newlist_int(result)
return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length)
+ def nonzero(self, cdata):
+ if self.size <= rffi.sizeof(lltype.Signed):
+ value = misc.read_raw_long_data(cdata, self.size)
+ return value != 0
+ else:
+ return self._nonzero_longlong(cdata)
+
+ def _nonzero_longlong(self, cdata):
+ # in its own function: LONGLONG may make the whole function jit-opaque
+ value = misc.read_raw_signed_data(cdata, self.size)
+ return bool(value)
+
class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive):
_attrs_ = []
@@ -435,6 +447,9 @@
return self.space.newlist_float(result)
return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length)
+ def nonzero(self, cdata):
+ return misc.is_nonnull_float(cdata, self.size)
+
class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat):
_attrs_ = []
@@ -501,3 +516,7 @@
rffi.LONGDOUBLE, rffi.LONGDOUBLEP)
return True
return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob)
+
+ @jit.dont_look_inside
+ def nonzero(self, cdata):
+ return misc.is_nonnull_longdouble(cdata)
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -15,7 +15,6 @@
class W_Library(W_Root):
_immutable_ = True
- handle = rffi.cast(DLLHANDLE, 0)
def __init__(self, space, filename, flags):
self.space = space
@@ -27,8 +26,9 @@
except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
h = self.handle
if h != rffi.cast(DLLHANDLE, 0):
self.handle = rffi.cast(DLLHANDLE, 0)
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -256,7 +256,7 @@
def is_nonnull_longdouble(cdata):
return _is_nonnull_longdouble(read_raw_longdouble_data(cdata))
def is_nonnull_float(cdata, size):
- return read_raw_float_data(cdata, size) != 0.0
+ return read_raw_float_data(cdata, size) != 0.0 # note: True if a NaN
def object_as_bool(space, w_ob):
# convert and cast a Python object to a boolean. Accept an integer
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -141,9 +141,13 @@
INF = 1E200 * 1E200
for name in ["float", "double"]:
p = new_primitive_type(name)
- assert bool(cast(p, 0))
+ assert bool(cast(p, 0)) is False # since 1.7
+ assert bool(cast(p, -0.0)) is False # since 1.7
+ assert bool(cast(p, 1e-42)) is True
+ assert bool(cast(p, -1e-42)) is True
assert bool(cast(p, INF))
assert bool(cast(p, -INF))
+ assert bool(cast(p, float("nan")))
assert int(cast(p, -150)) == -150
assert int(cast(p, 61.91)) == 61
assert long(cast(p, 61.91)) == 61
@@ -202,7 +206,8 @@
def test_character_type():
p = new_primitive_type("char")
- assert bool(cast(p, '\x00'))
+ assert bool(cast(p, 'A')) is True
+ assert bool(cast(p, '\x00')) is False # since 1.7
assert cast(p, '\x00') != cast(p, -17*256)
assert int(cast(p, 'A')) == 65
assert long(cast(p, 'A')) == 65
@@ -2558,7 +2563,8 @@
BBoolP = new_pointer_type(BBool)
assert int(cast(BBool, False)) == 0
assert int(cast(BBool, True)) == 1
- assert bool(cast(BBool, False)) is True # warning!
+ assert bool(cast(BBool, False)) is False # since 1.7
+ assert bool(cast(BBool, True)) is True
assert int(cast(BBool, 3)) == 1
assert int(cast(BBool, long(3))) == 1
assert int(cast(BBool, long(10)**4000)) == 1
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -331,6 +331,25 @@
gc.collect()
assert seen == [1]
+ def test_ffi_gc_disable(self):
+ import _cffi_backend as _cffi1_backend
+ ffi = _cffi1_backend.FFI()
+ p = ffi.new("int *", 123)
+ raises(TypeError, ffi.gc, p, None)
+ seen = []
+ q1 = ffi.gc(p, lambda p: seen.append(1))
+ q2 = ffi.gc(q1, lambda p: seen.append(2))
+ import gc; gc.collect()
+ assert seen == []
+ assert ffi.gc(q1, None) is None
+ del q1, q2
+ for i in range(5):
+ if seen:
+ break
+ import gc
+ gc.collect()
+ assert seen == [2]
+
def test_ffi_new_allocator_1(self):
import _cffi_backend as _cffi1_backend
ffi = _cffi1_backend.FFI()
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -43,22 +43,18 @@
def __init__(self, space):
self.space = space
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
# assume that the file and stream objects are only visible in the
- # thread that runs __del__, so no race condition should be possible
- self.clear_all_weakrefs()
+ # thread that runs _finalize_, so no race condition should be
+ # possible and no locking is done here.
if self.stream is not None:
- self.enqueue_for_destruction(self.space, W_File.destructor,
- 'close() method of ')
-
- def destructor(self):
- assert isinstance(self, W_File)
- try:
- self.direct_close()
- except StreamErrors as e:
- operr = wrap_streamerror(self.space, e, self.w_name)
- raise operr
+ try:
+ self.direct_close()
+ except StreamErrors as e:
+ operr = wrap_streamerror(self.space, e, self.w_name)
+ raise operr
def fdopenstream(self, stream, fd, mode, w_name=None):
self.fd = fd
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -76,11 +76,14 @@
except:
lltype.free(ctx, flavor='raw')
raise
+ self.register_finalizer(space)
- def __del__(self):
- if self.ctx:
- ropenssl.EVP_MD_CTX_cleanup(self.ctx)
- lltype.free(self.ctx, flavor='raw')
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
+ ropenssl.EVP_MD_CTX_cleanup(ctx)
+ lltype.free(ctx, flavor='raw')
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -952,9 +952,15 @@
self.w_writer = None
raise
- def __del__(self):
- self.clear_all_weakrefs()
+ def _finalize_(self):
# Don't call the base __del__: do not close the files!
+ # Usually the _finalize_() method is not called at all because
+ # we set 'needs_to_finalize = False' in this class, so
+ # W_IOBase.__init__() won't call register_finalizer().
+ # However, this method might still be called: if the user
+ # makes an app-level subclass and adds a custom __del__.
+ pass
+ needs_to_finalize = False
# forward to reader
for method in ['read', 'peek', 'read1', 'readinto', 'readable']:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -59,6 +59,8 @@
self.__IOBase_closed = False
if add_to_autoflusher:
get_autoflusher(space).add(self)
+ if self.needs_to_finalize:
+ self.register_finalizer(space)
def getdict(self, space):
return self.w_dict
@@ -71,13 +73,7 @@
return True
return False
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_IOBase.destructor,
- 'internal __del__ of ')
-
- def destructor(self):
- assert isinstance(self, W_IOBase)
+ def _finalize_(self):
space = self.space
w_closed = space.findattr(self, space.wrap('closed'))
try:
@@ -90,6 +86,7 @@
# equally as bad, and potentially more frequent (because of
# shutdown issues).
pass
+ needs_to_finalize = True
def _CLOSED(self):
# Use this macro whenever you want to check the internal `closed`
diff --git a/pypy/module/_multibytecodec/app_multibytecodec.py b/pypy/module/_multibytecodec/app_multibytecodec.py
--- a/pypy/module/_multibytecodec/app_multibytecodec.py
+++ b/pypy/module/_multibytecodec/app_multibytecodec.py
@@ -44,8 +44,10 @@
self, data))
def reset(self):
- self.stream.write(MultibyteIncrementalEncoder.encode(
- self, '', final=True))
+ data = MultibyteIncrementalEncoder.encode(
+ self, '', final=True)
+ if len(data) > 0:
+ self.stream.write(data)
MultibyteIncrementalEncoder.reset(self)
def writelines(self, lines):
diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py
--- a/pypy/module/_multibytecodec/interp_incremental.py
+++ b/pypy/module/_multibytecodec/interp_incremental.py
@@ -20,8 +20,9 @@
self.codec = codec.codec
self.name = codec.name
self._initialize()
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
self._free()
def reset_w(self):
diff --git a/pypy/module/_multibytecodec/test/test_app_stream.py b/pypy/module/_multibytecodec/test/test_app_stream.py
--- a/pypy/module/_multibytecodec/test/test_app_stream.py
+++ b/pypy/module/_multibytecodec/test/test_app_stream.py
@@ -90,3 +90,15 @@
w.write(u'\u304b')
w.write(u'\u309a')
assert w.stream.output == ['\x83m', '', '\x82\xf5']
+
+ def test_writer_seek_no_empty_write(self):
+ # issue #2293: codecs.py will sometimes issue a reset()
+ # on a StreamWriter attached to a file that is not opened
+ # for writing at all. We must not emit a "write('')"!
+ class FakeFile:
+ def write(self, data):
+ raise IOError("can't write!")
+ #
+ w = self.ShiftJisx0213StreamWriter(FakeFile())
+ w.reset()
+ # assert did not crash
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -40,14 +40,17 @@
BUFFER_SIZE = 1024
buffer = lltype.nullptr(rffi.CCHARP.TO)
- def __init__(self, flags):
+ def __init__(self, space, flags):
self.flags = flags
self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE,
flavor='raw')
+ self.register_finalizer(space)
- def __del__(self):
- if self.buffer:
- lltype.free(self.buffer, flavor='raw')
+ def _finalize_(self):
+ buf = self.buffer
+ if buf:
+ self.buffer = lltype.nullptr(rffi.CCHARP.TO)
+ lltype.free(buf, flavor='raw')
try:
self.do_close()
except OSError:
@@ -242,7 +245,7 @@
def __init__(self, space, fd, flags):
if fd == self.INVALID_HANDLE_VALUE or fd < 0:
raise oefmt(space.w_IOError, "invalid handle %d", fd)
- W_BaseConnection.__init__(self, flags)
+ W_BaseConnection.__init__(self, space, flags)
self.fd = fd
@unwrap_spec(fd=int, readable=bool, writable=bool)
@@ -363,8 +366,8 @@
if sys.platform == 'win32':
from rpython.rlib.rwin32 import INVALID_HANDLE_VALUE
- def __init__(self, handle, flags):
- W_BaseConnection.__init__(self, flags)
+ def __init__(self, space, handle, flags):
+ W_BaseConnection.__init__(self, space, flags)
self.handle = handle
@unwrap_spec(readable=bool, writable=bool)
@@ -375,7 +378,7 @@
flags = (readable and READABLE) | (writable and WRITABLE)
self = space.allocate_instance(W_PipeConnection, w_subtype)
- W_PipeConnection.__init__(self, handle, flags)
+ W_PipeConnection.__init__(self, space, handle, flags)
return space.wrap(self)
def descr_repr(self, space):
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -430,11 +430,12 @@
class W_SemLock(W_Root):
- def __init__(self, handle, kind, maxvalue):
+ def __init__(self, space, handle, kind, maxvalue):
self.handle = handle
self.kind = kind
self.count = 0
self.maxvalue = maxvalue
+ self.register_finalizer(space)
def kind_get(self, space):
return space.newint(self.kind)
@@ -508,7 +509,7 @@
@unwrap_spec(kind=int, maxvalue=int)
def rebuild(space, w_cls, w_handle, kind, maxvalue):
self = space.allocate_instance(W_SemLock, w_cls)
- self.__init__(handle_w(space, w_handle), kind, maxvalue)
+ self.__init__(space, handle_w(space, w_handle), kind, maxvalue)
return space.wrap(self)
def enter(self, space):
@@ -517,7 +518,7 @@
def exit(self, space, __args__):
self.release(space)
- def __del__(self):
+ def _finalize_(self):
delete_semaphore(self.handle)
@unwrap_spec(kind=int, value=int, maxvalue=int)
@@ -534,7 +535,7 @@
raise wrap_oserror(space, e)
self = space.allocate_instance(W_SemLock, w_subtype)
- self.__init__(handle, kind, maxvalue)
+ self.__init__(space, handle, kind, maxvalue)
return space.wrap(self)
diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -4,7 +4,7 @@
from pypy.interpreter.function import Function, Method
from pypy.interpreter.module import Module
from pypy.interpreter.pytraceback import PyTraceback
-from pypy.interpreter.generator import GeneratorIteratorWithDel
+from pypy.interpreter.generator import GeneratorIterator
from rpython.rlib.objectmodel import instantiate
from pypy.interpreter.gateway import unwrap_spec
from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject
@@ -59,7 +59,7 @@
return space.wrap(tb)
def generator_new(space):
- new_generator = instantiate(GeneratorIteratorWithDel)
+ new_generator = instantiate(GeneratorIterator)
return space.wrap(new_generator)
@unwrap_spec(current=int, remaining=int, step=int)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -278,6 +278,8 @@
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
self.ssl = libssl_SSL_new(w_ctx.ctx) # new ssl struct
+ self.register_finalizer(space)
+
index = compute_unique_id(self)
libssl_SSL_set_app_data(self.ssl, rffi.cast(rffi.VOIDP, index))
SOCKET_STORAGE.set(index, self)
@@ -317,16 +319,15 @@
self.ssl_sock_weakref_w = None
return self
- def __del__(self):
- self.enqueue_for_destruction(self.space, _SSLSocket.destructor,
- '__del__() method of ')
-
- def destructor(self):
- assert isinstance(self, _SSLSocket)
- if self.peer_cert:
- libssl_X509_free(self.peer_cert)
- if self.ssl:
- libssl_SSL_free(self.ssl)
+ def _finalize_(self):
+ peer_cert = self.peer_cert
+ if peer_cert:
+ self.peer_cert = lltype.nullptr(X509.TO)
+ libssl_X509_free(peer_cert)
+ ssl = self.ssl
+ if ssl:
+ self.ssl = lltype.nullptr(SSL.TO)
+ libssl_SSL_free(ssl)
@unwrap_spec(data='bufferstr')
def write(self, space, data):
@@ -1285,6 +1286,7 @@
self = space.allocate_instance(_SSLContext, w_subtype)
self.ctx = ctx
self.check_hostname = False
+ self.register_finalizer(space)
options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
if protocol != PY_SSL_VERSION_SSL2:
options |= SSL_OP_NO_SSLv2
@@ -1308,8 +1310,11 @@
return self
- def __del__(self):
- libssl_SSL_CTX_free(self.ctx)
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(SSL_CTX.TO)
+ libssl_SSL_CTX_free(ctx)
@unwrap_spec(server_side=int)
def descr_wrap_socket(self, space, w_sock, server_side, w_server_hostname=None, w_ssl_sock=None):
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -3,7 +3,8 @@
from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, ObjSpace
from pypy.interpreter.typedef import TypeDef
-from rpython.rlib import jit
+from pypy.interpreter.executioncontext import AsyncAction, report_error
+from rpython.rlib import jit, rgc
from rpython.rlib.rshrinklist import AbstractShrinkList
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rweakref import dead_ref
@@ -16,9 +17,12 @@
class WeakrefLifeline(W_Root):
+ typedef = None
+
cached_weakref = None
cached_proxy = None
other_refs_weak = None
+ has_callbacks = False
def __init__(self, space):
self.space = space
@@ -99,31 +103,10 @@
return w_ref
return space.w_None
-
-class WeakrefLifelineWithCallbacks(WeakrefLifeline):
-
- def __init__(self, space, oldlifeline=None):
- self.space = space
- if oldlifeline is not None:
- self.cached_weakref = oldlifeline.cached_weakref
- self.cached_proxy = oldlifeline.cached_proxy
- self.other_refs_weak = oldlifeline.other_refs_weak
-
- def __del__(self):
- """This runs when the interp-level object goes away, and allows
- its lifeline to go away. The purpose of this is to activate the
- callbacks even if there is no __del__ method on the interp-level
- W_Root subclass implementing the object.
- """
- if self.other_refs_weak is None:
- return
- items = self.other_refs_weak.items()
- for i in range(len(items)-1, -1, -1):
- w_ref = items[i]()
- if w_ref is not None and w_ref.w_callable is not None:
- w_ref.enqueue_for_destruction(self.space,
- W_WeakrefBase.activate_callback,
- 'weakref callback of ')
+ def enable_callbacks(self):
+ if not self.has_callbacks:
+ self.space.finalizer_queue.register_finalizer(self)
+ self.has_callbacks = True
@jit.dont_look_inside
def make_weakref_with_callback(self, w_subtype, w_obj, w_callable):
@@ -131,6 +114,7 @@
w_ref = space.allocate_instance(W_Weakref, w_subtype)
W_Weakref.__init__(w_ref, space, w_obj, w_callable)
self.append_wref_to(w_ref)
+ self.enable_callbacks()
return w_ref
@jit.dont_look_inside
@@ -141,8 +125,33 @@
else:
w_proxy = W_Proxy(space, w_obj, w_callable)
self.append_wref_to(w_proxy)
+ self.enable_callbacks()
return w_proxy
+ def _finalize_(self):
+ """This is called at the end, if enable_callbacks() was invoked.
+ It activates the callbacks.
+ """
+ if self.other_refs_weak is None:
+ return
+ #
+ # If this is set, then we're in the 'gc.disable()' mode. In that
+ # case, don't invoke the callbacks now.
+ if self.space.user_del_action.gc_disabled(self):
+ return
+ #
+ items = self.other_refs_weak.items()
+ self.other_refs_weak = None
+ for i in range(len(items)-1, -1, -1):
+ w_ref = items[i]()
+ if w_ref is not None and w_ref.w_callable is not None:
+ try:
+ w_ref.activate_callback()
+ except Exception as e:
+ report_error(self.space, e,
+ "weakref callback ", w_ref.w_callable)
+
+
# ____________________________________________________________
@@ -163,7 +172,6 @@
self.w_obj_weak = dead_ref
def activate_callback(w_self):
- assert isinstance(w_self, W_WeakrefBase)
w_self.space.call_function(w_self.w_callable, w_self)
def descr__repr__(self, space):
@@ -227,32 +235,16 @@
w_obj.setweakref(space, lifeline)
return lifeline
-def getlifelinewithcallbacks(space, w_obj):
- lifeline = w_obj.getweakref()
- if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None
- oldlifeline = lifeline
- lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline)
- w_obj.setweakref(space, lifeline)
- return lifeline
-
-
-def get_or_make_weakref(space, w_subtype, w_obj):
- return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj)
-
-
-def make_weakref_with_callback(space, w_subtype, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
-
def descr__new__weakref(space, w_subtype, w_obj, w_callable=None,
__args__=None):
if __args__.arguments_w:
raise oefmt(space.w_TypeError, "__new__ expected at most 2 arguments")
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_weakref(space, w_subtype, w_obj)
+ return lifeline.get_or_make_weakref(w_subtype, w_obj)
else:
- return make_weakref_with_callback(space, w_subtype, w_obj, w_callable)
+ return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
W_Weakref.typedef = TypeDef("weakref",
__doc__ = """A weak reference to an object 'obj'. A 'callback' can be given,
@@ -308,23 +300,15 @@
return space.call_args(w_obj, __args__)
-def get_or_make_proxy(space, w_obj):
- return getlifeline(space, w_obj).get_or_make_proxy(w_obj)
-
-
-def make_proxy_with_callback(space, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_proxy_with_callback(w_obj, w_callable)
-
-
def proxy(space, w_obj, w_callable=None):
"""Create a proxy object that weakly references 'obj'.
'callback', if given, is called with the proxy as an argument when 'obj'
is about to be finalized."""
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_proxy(space, w_obj)
+ return lifeline.get_or_make_proxy(w_obj)
else:
- return make_proxy_with_callback(space, w_obj, w_callable)
+ return lifeline.make_proxy_with_callback(w_obj, w_callable)
def descr__new__proxy(space, w_subtype, w_obj, w_callable=None):
raise oefmt(space.w_TypeError, "cannot create 'weakproxy' instances")
@@ -345,7 +329,7 @@
proxy_typedef_dict = {}
callable_proxy_typedef_dict = {}
-special_ops = {'repr': True, 'userdel': True, 'hash': True}
+special_ops = {'repr': True, 'hash': True}
for opname, _, arity, special_methods in ObjSpace.MethodTable:
if opname in special_ops or not special_methods:
diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py
--- a/pypy/module/_weakref/test/test_weakref.py
+++ b/pypy/module/_weakref/test/test_weakref.py
@@ -1,6 +1,9 @@
class AppTestWeakref(object):
spaceconfig = dict(usemodules=('_weakref',))
-
+
+ def setup_class(cls):
+ cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
+
def test_simple(self):
import _weakref, gc
class A(object):
@@ -287,6 +290,9 @@
assert a1 is None
def test_del_and_callback_and_id(self):
+ if not self.runappdirect:
+ skip("the id() doesn't work correctly in __del__ and "
+ "callbacks before translation")
import gc, weakref
seen_del = []
class A(object):
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -518,8 +518,14 @@
def __init__(self, space, compresslevel):
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self._init_bz2comp(compresslevel)
+ try:
+ self.running = False
+ self._init_bz2comp(compresslevel)
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
@@ -532,9 +538,12 @@
self.running = True
- def __del__(self):
- BZ2_bzCompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzCompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def compress(self, data):
@@ -621,10 +630,16 @@
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self.unused_data = ""
+ try:
+ self.running = False
+ self.unused_data = ""
- self._init_bz2decomp()
+ self._init_bz2decomp()
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2decomp(self):
bzerror = BZ2_bzDecompressInit(self.bzs, 0, 0)
@@ -633,9 +648,12 @@
self.running = True
- def __del__(self):
- BZ2_bzDecompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzDecompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def decompress(self, data):
diff --git a/pypy/module/bz2/test/support.py b/pypy/module/bz2/test/support.py
--- a/pypy/module/bz2/test/support.py
+++ b/pypy/module/bz2/test/support.py
@@ -10,5 +10,6 @@
#
while tries and ll2ctypes.ALLOCATED:
gc.collect() # to make sure we disallocate buffers
+ self.space.getexecutioncontext()._run_finalizers_now()
tries -= 1
assert not ll2ctypes.ALLOCATED
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1020,9 +1020,12 @@
class W_CPPInstance(W_Root):
- _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns']
+ _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns',
+ 'finalizer_registered']
_immutable_fields_ = ["cppclass", "isref"]
+ finalizer_registered = False
+
def __init__(self, space, cppclass, rawobject, isref, python_owns):
self.space = space
self.cppclass = cppclass
@@ -1032,6 +1035,12 @@
assert not isref or not python_owns
self.isref = isref
self.python_owns = python_owns
+ self._opt_register_finalizer()
+
+ def _opt_register_finalizer(self):
+ if self.python_owns and not self.finalizer_registered:
+ self.register_finalizer(self.space)
+ self.finalizer_registered = True
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
@@ -1045,6 +1054,7 @@
@unwrap_spec(value=bool)
def fset_python_owns(self, space, value):
self.python_owns = space.is_true(value)
+ self._opt_register_finalizer()
def get_cppthis(self, calling_scope):
return self.cppclass.get_cppthis(self, calling_scope)
@@ -1143,16 +1153,14 @@
(self.cppclass.name, rffi.cast(rffi.ULONG, self.get_rawobject())))
def destruct(self):
- assert isinstance(self, W_CPPInstance)
if self._rawobject and not self.isref:
memory_regulator.unregister(self)
capi.c_destruct(self.space, self.cppclass, self._rawobject)
self._rawobject = capi.C_NULL_OBJECT
- def __del__(self):
+ def _finalize_(self):
if self.python_owns:
- self.enqueue_for_destruction(self.space, W_CPPInstance.destruct,
- '__del__() method of ')
+ self.destruct()
W_CPPInstance.typedef = TypeDef(
'CPPInstance',
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -374,7 +374,75 @@
header = pypy_decl
if mangle_name('', typedef.name) is None:
header = None
- if name == 'tp_setattro':
+ handled = False
+ # unary functions
+ for tp_name, attr in [('tp_as_number.c_nb_int', '__int__'),
+ ('tp_as_number.c_nb_long', '__long__'),
+ ('tp_as_number.c_nb_float', '__float__'),
+ ('tp_as_number.c_nb_negative', '__neg__'),
+ ('tp_as_number.c_nb_positive', '__pos__'),
+ ('tp_as_number.c_nb_absolute', '__abs__'),
+ ('tp_as_number.c_nb_invert', '__invert__'),
+ ('tp_as_number.c_nb_index', '__index__'),
+ ('tp_str', '__str__'),
+ ('tp_repr', '__repr__'),
+ ('tp_iter', '__iter__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self):
+ return space.call_function(slot_fn, w_self)
+ api_func = slot_func.api_func
+ handled = True
+
+ # binary functions
+ for tp_name, attr in [('tp_as_number.c_nb_add', '__add__'),
+ ('tp_as_number.c_nb_subtract', '__subtract__'),
+ ('tp_as_number.c_nb_multiply', '__mul__'),
+ ('tp_as_number.c_nb_divide', '__div__'),
+ ('tp_as_number.c_nb_remainder', '__mod__'),
+ ('tp_as_number.c_nb_divmod', '__divmod__'),
+ ('tp_as_number.c_nb_lshift', '__lshift__'),
+ ('tp_as_number.c_nb_rshift', '__rshift__'),
+ ('tp_as_number.c_nb_and', '__and__'),
+ ('tp_as_number.c_nb_xor', '__xor__'),
+ ('tp_as_number.c_nb_or', '__or__'),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg):
+ return space.call_function(slot_fn, w_self, w_arg)
+ api_func = slot_func.api_func
+ handled = True
+
+ # ternary functions
+ for tp_name, attr in [('tp_as_number.c_nb_power', ''),
+ ]:
+ if name == tp_name:
+ slot_fn = w_type.getdictvalue(space, attr)
+ if slot_fn is None:
+ return
+
+ @cpython_api([PyObject, PyObject, PyObject], PyObject, header=header)
+ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
+ def slot_func(space, w_self, w_arg1, w_arg2):
+ return space.call_function(slot_fn, w_self, w_arg1, w_arg2)
+ api_func = slot_func.api_func
+ handled = True
+
+ if handled:
+ pass
+ elif name == 'tp_setattro':
setattr_fn = w_type.getdictvalue(space, '__setattr__')
delattr_fn = w_type.getdictvalue(space, '__delattr__')
if setattr_fn is None:
@@ -401,28 +469,6 @@
return space.call_function(getattr_fn, w_self, w_name)
api_func = slot_tp_getattro.api_func
- elif name == 'tp_as_number.c_nb_int':
- int_fn = w_type.getdictvalue(space, '__int__')
- if int_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_int(space, w_self):
- return space.call_function(int_fn, w_self)
- api_func = slot_nb_int.api_func
-
- elif name == 'tp_as_number.c_nb_float':
- float_fn = w_type.getdictvalue(space, '__float__')
- if float_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_nb_float(space, w_self):
- return space.call_function(float_fn, w_self)
- api_func = slot_nb_float.api_func
-
elif name == 'tp_call':
call_fn = w_type.getdictvalue(space, '__call__')
if call_fn is None:
@@ -436,28 +482,6 @@
return space.call_args(call_fn, args)
api_func = slot_tp_call.api_func
- elif name == 'tp_str':
- str_fn = w_type.getdictvalue(space, '__str__')
- if str_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_str(space, w_self):
- return space.call_function(str_fn, w_self)
- api_func = slot_tp_str.api_func
-
- elif name == 'tp_iter':
- iter_fn = w_type.getdictvalue(space, '__iter__')
- if iter_fn is None:
- return
-
- @cpython_api([PyObject], PyObject, header=header)
- @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
- def slot_tp_iter(space, w_self):
- return space.call_function(iter_fn, w_self)
- api_func = slot_tp_iter.api_func
-
elif name == 'tp_iternext':
iternext_fn = w_type.getdictvalue(space, 'next')
if iternext_fn is None:
@@ -501,6 +525,7 @@
return space.call_args(space.get(new_fn, w_self), args)
api_func = slot_tp_new.api_func
else:
+ # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce
return
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c
--- a/pypy/module/cpyext/src/abstract.c
+++ b/pypy/module/cpyext/src/abstract.c
@@ -326,3 +326,9 @@
return tmp;
}
+/* for binary compatibility with 5.1 */
+PyAPI_FUNC(void) PyPyObject_Del(PyObject *);
+void PyPyObject_Del(PyObject *op)
+{
+ PyObject_FREE(op);
+}
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root
from pypy.module.cpyext.state import State
@@ -100,7 +100,8 @@
PyPy_TypedefTest2(space, ppos)
lltype.free(ppos, flavor='raw')
-
+ at pytest.mark.skipif(os.environ.get('USER')=='root',
+ reason='root can write to all files')
def test_copy_header_files(tmpdir):
api.copy_header_files(tmpdir, True)
def check(name):
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -40,7 +40,7 @@
#endif
if(s->ob_type->tp_basicsize != expected_size)
{
- printf("tp_basicsize==%ld\\n", s->ob_type->tp_basicsize);
+ printf("tp_basicsize==%zd\\n", s->ob_type->tp_basicsize);
result = 0;
}
Py_DECREF(s);
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -921,3 +921,105 @@
' multiple bases have instance lay-out conflict')
else:
raise AssertionError("did not get TypeError!")
+
+ def test_call_tp_dealloc_when_created_from_python(self):
+ module = self.import_extension('foo', [
+ ("fetchFooType", "METH_VARARGS",
+ """
+ PyObject *o;
+ Foo_Type.tp_basicsize = sizeof(FooObject);
+ Foo_Type.tp_dealloc = &dealloc_foo;
+ Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
+ | Py_TPFLAGS_BASETYPE;
+ Foo_Type.tp_new = &new_foo;
+ Foo_Type.tp_free = &PyObject_Del;
+ if (PyType_Ready(&Foo_Type) < 0) return NULL;
+
+ o = PyObject_New(PyObject, &Foo_Type);
+ init_foo(o);
+ Py_DECREF(o); /* calls dealloc_foo immediately */
+
+ Py_INCREF(&Foo_Type);
+ return (PyObject *)&Foo_Type;
+ """),
+ ("newInstance", "METH_O",
+ """
+ PyTypeObject *tp = (PyTypeObject *)args;
+ PyObject *e = PyTuple_New(0);
+ PyObject *o = tp->tp_new(tp, e, NULL);
From pypy.commits at gmail.com Mon May 9 16:02:23 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Mon, 09 May 2016 13:02:23 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast: Remove trailing semicolons
on macros (yikes).
Message-ID: <5730eccf.a82cc20a.62e83.ffffe06c@mx.google.com>
Author: Devin Jeanpierre
Branch: cpyext-macros-cast
Changeset: r84340:cbcd8db8ebf5
Date: 2016-05-09 13:01 -0700
http://bitbucket.org/pypy/pypy/changeset/cbcd8db8ebf5/
Log: Remove trailing semicolons on macros (yikes).
diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h
--- a/pypy/module/cpyext/include/intobject.h
+++ b/pypy/module/cpyext/include/intobject.h
@@ -7,7 +7,7 @@
extern "C" {
#endif
-#define PyInt_AS_LONG(obj) _PyInt_AS_LONG((PyObject*)obj);
+#define PyInt_AS_LONG(obj) _PyInt_AS_LONG((PyObject*)obj)
typedef struct {
PyObject_HEAD
diff --git a/pypy/module/cpyext/include/setobject.h b/pypy/module/cpyext/include/setobject.h
--- a/pypy/module/cpyext/include/setobject.h
+++ b/pypy/module/cpyext/include/setobject.h
@@ -6,7 +6,7 @@
extern "C" {
#endif
-#define PySet_GET_SIZE(obj) _PySet_GET_SIZE((PyObject*)obj);
+#define PySet_GET_SIZE(obj) _PySet_GET_SIZE((PyObject*)obj)
#ifdef __cplusplus
}
From pypy.commits at gmail.com Mon May 9 18:04:10 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 09 May 2016 15:04:10 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: adapt to py3k
Message-ID: <5731095a.882cc20a.2b74b.571e@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84341:bc87e9e3fa04
Date: 2016-05-09 15:03 -0700
http://bitbucket.org/pypy/pypy/changeset/bc87e9e3fa04/
Log: adapt to py3k
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -104,7 +104,7 @@
all_constants = []
p = lib.my_rlimit_consts
while p.name:
- name = ffi.string(p.name)
+ name = ffi.string(p.name).decode()
globals()[name] = int(p.value)
all_constants.append(name)
p += 1
From pypy.commits at gmail.com Mon May 9 21:56:42 2016
From: pypy.commits at gmail.com (pjenvey)
Date: Mon, 09 May 2016 18:56:42 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: reapply xfails from default
Message-ID: <57313fda.8a37c20a.4d8f2.4479@mx.google.com>
Author: Philip Jenvey
Branch: py3k
Changeset: r84342:c871cbd337d4
Date: 2016-05-09 18:54 -0700
http://bitbucket.org/pypy/pypy/changeset/c871cbd337d4/
Log: reapply xfails from default
diff --git a/lib-python/3/ctypes/test/test_python_api.py b/lib-python/3/ctypes/test/test_python_api.py
--- a/lib-python/3/ctypes/test/test_python_api.py
+++ b/lib-python/3/ctypes/test/test_python_api.py
@@ -19,6 +19,7 @@
class PythonAPITestCase(unittest.TestCase):
+ @xfail
def test_PyBytes_FromStringAndSize(self):
PyBytes_FromStringAndSize = pythonapi.PyBytes_FromStringAndSize
@@ -71,6 +72,7 @@
del pyobj
self.assertEqual(grc(s), ref)
+ @xfail
def test_PyOS_snprintf(self):
PyOS_snprintf = pythonapi.PyOS_snprintf
PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p
@@ -85,6 +87,7 @@
# not enough arguments
self.assertRaises(TypeError, PyOS_snprintf, buf)
+ @xfail
def test_pyobject_repr(self):
self.assertEqual(repr(py_object()), "py_object()")
self.assertEqual(repr(py_object(42)), "py_object(42)")
From pypy.commits at gmail.com Tue May 10 03:33:18 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 00:33:18 -0700 (PDT)
Subject: [pypy-commit] pypy default: Re-add debug_rotate_nursery() in case
we're running in PYPY_GC_DEBUG and
Message-ID: <57318ebe.161b1c0a.70e6d.6877@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84343:226dcd726437
Date: 2016-05-10 09:24 +0200
http://bitbucket.org/pypy/pypy/changeset/226dcd726437/
Log: Re-add debug_rotate_nursery() in case we're running in PYPY_GC_DEBUG
and don't have any pinned object.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -568,14 +568,14 @@
# set up extra stuff for PYPY_GC_DEBUG.
MovingGCBase.post_setup(self)
if self.DEBUG and llarena.has_protect:
- # gc debug mode: allocate 23 nurseries instead of just 1,
+ # gc debug mode: allocate 7 nurseries instead of just 1,
# and use them alternatively, while mprotect()ing the unused
# ones to detect invalid access.
debug_start("gc-debug")
self.debug_rotating_nurseries = lltype.malloc(
- NURSARRAY, 22, flavor='raw', track_allocation=False)
+ NURSARRAY, 6, flavor='raw', track_allocation=False)
i = 0
- while i < 22:
+ while i < 6:
nurs = self._alloc_nursery()
llarena.arena_protect(nurs, self._nursery_memory_size(), True)
self.debug_rotating_nurseries[i] = nurs
@@ -1731,7 +1731,6 @@
llarena.arena_reset(prev, pinned_obj_size, 3)
else:
llarena.arena_reset(prev, pinned_obj_size, 0)
- # XXX: debug_rotate_nursery missing here
#
# clean up object's flags
obj = cur + size_gc_header
@@ -1747,6 +1746,8 @@
# reset everything after the last pinned object till the end of the arena
if self.gc_nursery_debug:
llarena.arena_reset(prev, self.nursery + self.nursery_size - prev, 3)
+ if not nursery_barriers.non_empty(): # no pinned objects
+ self.debug_rotate_nursery()
else:
llarena.arena_reset(prev, self.nursery + self.nursery_size - prev, 0)
#
@@ -1756,7 +1757,6 @@
self.nursery_barriers = nursery_barriers
self.surviving_pinned_objects.delete()
#
- # XXX gc-minimark-pinning does a debug_rotate_nursery() here (groggi)
self.nursery_free = self.nursery
self.nursery_top = self.nursery_barriers.popleft()
#
From pypy.commits at gmail.com Tue May 10 03:33:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 00:33:20 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix for 392dd419f5d0
Message-ID: <57318ec0.952f1c0a.258d0.7626@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84344:4a98b6f0536f
Date: 2016-05-10 09:33 +0200
http://bitbucket.org/pypy/pypy/changeset/4a98b6f0536f/
Log: Fix for 392dd419f5d0
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -1521,7 +1521,7 @@
# Instantiated in cpyext/ndarrayobject. It is here since ufunc calls
# set_dims_and_steps, otherwise ufunc, ndarrayobject would have circular
# imports
-npy_intpp = rffi.INTPTR_T
+npy_intpp = rffi.INTPTR_TP # "intptr_t *"
LONG_SIZE = LONG_BIT / 8
CCHARP_SIZE = _get_bitsize('P') / 8
diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py
--- a/rpython/rtyper/lltypesystem/rffi.py
+++ b/rpython/rtyper/lltypesystem/rffi.py
@@ -475,7 +475,7 @@
TYPES += ['signed char', 'unsigned char',
'long long', 'unsigned long long',
'size_t', 'time_t', 'wchar_t',
- 'uintptr_t', 'intptr_t',
+ 'uintptr_t', 'intptr_t', # C note: these two are _integer_ types
'void*'] # generic pointer type
# This is a bit of a hack since we can't use rffi_platform here.
From pypy.commits at gmail.com Tue May 10 03:50:37 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 00:50:37 -0700 (PDT)
Subject: [pypy-commit] pypy default: Use the correct type in micronumpy. Fix
the types expected in ndarrayobject.
Message-ID: <573192cd.a16ec20a.6dd2d.4adf@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84345:10c47aaadaba
Date: 2016-05-10 09:50 +0200
http://bitbucket.org/pypy/pypy/changeset/10c47aaadaba/
Log: Use the correct type in micronumpy. Fix the types expected in
ndarrayobject.
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -26,6 +26,8 @@
ARRAY_CARRAY = ARRAY_C_CONTIGUOUS | ARRAY_BEHAVED
ARRAY_DEFAULT = ARRAY_CARRAY
+npy_intpp = rffi.CArrayPtr(Py_ssize_t)
+
HEADER = 'pypy_numpy.h'
@cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER)
@@ -196,15 +198,15 @@
order=order, owning=owning, w_subtype=w_subtype)
- at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject, header=HEADER)
+ at cpython_api([Py_ssize_t, npy_intpp, Py_ssize_t], PyObject, header=HEADER)
def _PyArray_SimpleNew(space, nd, dims, typenum):
return simple_new(space, nd, dims, typenum)
- at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject, header=HEADER)
+ at cpython_api([Py_ssize_t, npy_intpp, Py_ssize_t, rffi.VOIDP], PyObject, header=HEADER)
def _PyArray_SimpleNewFromData(space, nd, dims, typenum, data):
return simple_new_from_data(space, nd, dims, typenum, data, owning=False)
- at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject, header=HEADER)
+ at cpython_api([Py_ssize_t, npy_intpp, Py_ssize_t, rffi.VOIDP], PyObject, header=HEADER)
def _PyArray_SimpleNewFromDataOwning(space, nd, dims, typenum, data):
# Variant to take over ownership of the memory, equivalent to:
# PyObject *arr = PyArray_SimpleNewFromData(nd, dims, typenum, data);
@@ -212,7 +214,7 @@
return simple_new_from_data(space, nd, dims, typenum, data, owning=True)
- at cpython_api([rffi.VOIDP, Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.LONGP,
+ at cpython_api([rffi.VOIDP, Py_ssize_t, npy_intpp, Py_ssize_t, npy_intpp,
rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject, header=HEADER)
def _PyArray_New(space, subtype, nd, dims, typenum, strides, data, itemsize, flags, obj):
if strides:
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -1521,7 +1521,8 @@
# Instantiated in cpyext/ndarrayobject. It is here since ufunc calls
# set_dims_and_steps, otherwise ufunc, ndarrayobject would have circular
# imports
-npy_intpp = rffi.INTPTR_TP # "intptr_t *"
+Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t')
+npy_intpp = rffi.CArrayPtr(Py_ssize_t)
LONG_SIZE = LONG_BIT / 8
CCHARP_SIZE = _get_bitsize('P') / 8
From pypy.commits at gmail.com Tue May 10 05:19:14 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 02:19:14 -0700 (PDT)
Subject: [pypy-commit] pypy default: Add a passing test
Message-ID: <5731a792.aaf0c20a.374fa.7337@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84346:927199115c54
Date: 2016-05-10 10:21 +0200
http://bitbucket.org/pypy/pypy/changeset/927199115c54/
Log: Add a passing test
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -1023,3 +1023,30 @@
break
self.debug_collect()
assert module.getCounter() == 7070
+
+ def test_tp_call_reverse(self):
+ module = self.import_extension('foo', [
+ ("new_obj", "METH_NOARGS",
+ '''
+ PyObject *obj;
+ Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
+ Foo_Type.tp_call = &my_tp_call;
+ if (PyType_Ready(&Foo_Type) < 0) return NULL;
+ obj = PyObject_New(PyObject, &Foo_Type);
+ return obj;
+ '''
+ )],
+ '''
+ static PyObject *
+ my_tp_call(PyObject *self, PyObject *args, PyObject *kwds)
+ {
+ return PyInt_FromLong(42);
+ }
+ static PyTypeObject Foo_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "foo.foo",
+ };
+ ''')
+ x = module.new_obj()
+ assert x() == 42
+ assert x(4, bar=5) == 42
From pypy.commits at gmail.com Tue May 10 05:19:16 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 02:19:16 -0700 (PDT)
Subject: [pypy-commit] pypy default: Metaclass support: revert a change done
in e6d78e83ee3c that would not
Message-ID: <5731a794.06921c0a.1e1d5.ffffb56f@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84347:415b6c689836
Date: 2016-05-10 11:19 +0200
http://bitbucket.org/pypy/pypy/changeset/415b6c689836/
Log: Metaclass support: revert a change done in e6d78e83ee3c that would
not set is_cpytype() on cpyext subtypes of type. Unsure why it was
needed at that point in time, but it doesn't appear to be now, and
it gets massively in the way, because it confuses
pypy.tool.ann_override.
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -1,3 +1,4 @@
+from pypy.interpreter import gateway
from rpython.rtyper.lltypesystem import rffi
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.test.test_api import BaseApiTest
@@ -391,6 +392,14 @@
api.Py_DecRef(ref)
class AppTestSlots(AppTestCpythonExtensionBase):
+ def setup_class(cls):
+ AppTestCpythonExtensionBase.setup_class.im_func(cls)
+ def _check_type_object(w_X):
+ assert w_X.is_cpytype()
+ assert not w_X.is_heaptype()
+ cls.w__check_type_object = cls.space.wrap(
+ gateway.interp2app(_check_type_object))
+
def test_some_slots(self):
module = self.import_extension('foo', [
("test_type", "METH_O",
@@ -1050,3 +1059,29 @@
x = module.new_obj()
assert x() == 42
assert x(4, bar=5) == 42
+
+ def test_custom_metaclass(self):
+ module = self.import_extension('foo', [
+ ("getMetaClass", "METH_NOARGS",
+ '''
+ PyObject *obj;
+ FooType_Type.tp_flags = Py_TPFLAGS_DEFAULT;
+ FooType_Type.tp_base = &PyType_Type;
+ if (PyType_Ready(&FooType_Type) < 0) return NULL;
+ Py_INCREF(&FooType_Type);
+ return (PyObject *)&FooType_Type;
+ '''
+ )],
+ '''
+ static PyTypeObject FooType_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "foo.Type",
+ };
+ ''')
+ FooType = module.getMetaClass()
+ if not self.runappdirect:
+ self._check_type_object(FooType)
+ class X(object):
+ __metaclass__ = FooType
+ print repr(X)
+ X()
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -405,8 +405,7 @@
W_TypeObject.__init__(self, space, name,
bases_w or [space.w_object], dict_w, force_new_layout=new_layout)
- if not space.is_true(space.issubtype(self, space.w_type)):
- self.flag_cpytype = True
+ self.flag_cpytype = True
self.flag_heaptype = False
# if a sequence or a mapping, then set the flag to force it
if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item:
From pypy.commits at gmail.com Tue May 10 05:39:00 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 02:39:00 -0700 (PDT)
Subject: [pypy-commit] cffi default: Expand the error message
Message-ID: <5731ac34.26b0c20a.c9083.79b4@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2696:324549d18676
Date: 2016-05-10 11:39 +0200
http://bitbucket.org/cffi/cffi/changeset/324549d18676/
Log: Expand the error message
diff --git a/cffi/commontypes.py b/cffi/commontypes.py
--- a/cffi/commontypes.py
+++ b/cffi/commontypes.py
@@ -35,8 +35,11 @@
"you call ffi.set_unicode()" % (commontype,))
else:
if commontype == cdecl:
- raise api.FFIError("Unsupported type: %r. Please file a bug "
- "if you think it should be." % (commontype,))
+ raise api.FFIError(
+ "Unsupported type: %r. Please look at "
+ "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations "
+ "and file an issue if you think this type should really "
+ "be supported." % (commontype,))
result, quals = parser.parse_type_and_quals(cdecl) # recursive
assert isinstance(result, model.BaseTypeByIdentity)
From pypy.commits at gmail.com Tue May 10 05:53:21 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 10 May 2016 02:53:21 -0700 (PDT)
Subject: [pypy-commit] pypy new-jit-log: merged default
Message-ID: <5731af91.a16ec20a.6dd2d.7e86@mx.google.com>
Author: Richard Plangger
Branch: new-jit-log
Changeset: r84348:cfecd970a924
Date: 2016-05-09 13:29 +0200
http://bitbucket.org/pypy/pypy/changeset/cfecd970a924/
Log: merged default
diff too long, truncating to 2000 out of 2152 lines
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -33,26 +33,25 @@
it from a finalizer. A finalizer runs earlier, and in topological
order; care must be taken that the object might still be reachable at
this point if we're clever enough. A destructor on the other hand runs
-last; nothing can be done with the object any more.
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
Destructors
-----------
A destructor is an RPython ``__del__()`` method that is called directly
-by the GC when there is no more reference to an object. Intended for
-objects that just need to free a block of raw memory or close a file.
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
There are restrictions on the kind of code you can put in ``__del__()``,
including all other functions called by it. These restrictions are
-checked. In particular you cannot access fields containing GC objects;
-and if you call an external C function, it must be a "safe" function
-(e.g. not releasing the GIL; use ``releasegil=False`` in
-``rffi.llexternal()``).
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
-If there are several objects with destructors that die during the same
-GC cycle, they are called in a completely random order --- but that
-should not matter because destructors cannot do much anyway.
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
Register_finalizer
@@ -95,10 +94,15 @@
To find the queued items, call ``fin.next_dead()`` repeatedly. It
returns the next queued item, or ``None`` when the queue is empty.
-It is allowed in theory to cumulate several different
+In theory, it would kind of work if you cumulate several different
``FinalizerQueue`` instances for objects of the same class, and
(always in theory) the same ``obj`` could be registered several times
in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
Ordering of finalizers
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -84,3 +84,8 @@
.. branch: cpyext-more-slots
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,37 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
+ """
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
"""
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -389,9 +401,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
@@ -1844,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
@@ -515,75 +521,98 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+ self._call_finalizer(w_obj)
+
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
+ pdd = self.pending_with_disabled_del
+ if pdd is None:
+ return False
+ else:
+ pdd.append(w_obj)
+ return True
+
+ def _call_finalizer(self, w_obj):
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
space = self.space
- while pending is not None:
+ if w_obj.typedef is None:
+ w_del = None # obscure case: for WeakrefLifeline
+ else:
+ w_del = space.lookup(w_obj, '__del__')
+ if w_del is not None:
+ if self.gc_disabled(w_obj):
+ return
try:
- pending.callback(pending.w_obj)
- except OperationError as e:
- e.write_unraisable(space, pending.descrname, pending.w_obj)
- e.clear(space) # break up reference cycles
- pending = pending.next
- #
- # Note: 'dying_objects' used to be just a regular list instead
- # of a chained list. This was the cause of "leaks" if we have a
- # program that constantly creates new objects with finalizers.
- # Here is why: say 'dying_objects' is a long list, and there
- # are n instances in it. Then we spend some time in this
- # function, possibly triggering more GCs, but keeping the list
- # of length n alive. Then the list is suddenly freed at the
- # end, and we return to the user program. At this point the
- # GC limit is still very high, because just before, there was
- # a list of length n alive. Assume that the program continues
- # to allocate a lot of instances with finalizers. The high GC
- # limit means that it could allocate a lot of instances before
- # reaching it --- possibly more than n. So the whole procedure
- # repeats with higher and higher values of n.
- #
- # This does not occur in the current implementation because
- # there is no list of length n: if n is large, then the GC
- # will run several times while walking the list, but it will
- # see lower and lower memory usage, with no lower bound of n.
+ space.get_and_call_function(w_del, w_obj)
+ except Exception as e:
+ report_error(space, e, "method __del__ of ", w_obj)
+
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ report_error(space, e, "finalizer of ", w_obj)
+
+
+def report_error(space, e, where, w_obj):
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
+
+
+def make_finalizer_queue(W_Root, space):
+ """Make a FinalizerQueue subclass which responds to GC finalizer
+ events by 'firing' the UserDelAction class above. It does not
+ directly fetches the objects to finalize at all; they stay in the
+ GC-managed queue, and will only be fetched by UserDelAction
+ (between bytecodes)."""
+
+ class WRootFinalizerQueue(rgc.FinalizerQueue):
+ Class = W_Root
+
+ def finalizer_trigger(self):
+ space.user_del_action.fire()
+
+ space.user_del_action = UserDelAction(space)
+ space.finalizer_queue = WRootFinalizerQueue()
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,6 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
+from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY
from rpython.rlib import jit
@@ -13,6 +14,8 @@
self.frame = frame # turned into None when frame_finished_execution
self.pycode = frame.pycode
self.running = False
+ if self.pycode.co_flags & CO_YIELD_INSIDE_TRY:
+ self.register_finalizer(self.space)
def descr__repr__(self, space):
if self.pycode is None:
@@ -139,7 +142,6 @@
def descr_close(self):
"""x.close(arg) -> raise GeneratorExit inside generator."""
- assert isinstance(self, GeneratorIterator)
space = self.space
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
@@ -212,25 +214,21 @@
unpack_into = _create_unpack_into()
unpack_into_w = _create_unpack_into()
-
-class GeneratorIteratorWithDel(GeneratorIterator):
-
- def __del__(self):
- # Only bother enqueuing self to raise an exception if the frame is
- # still not finished and finally or except blocks are present.
- self.clear_all_weakrefs()
+ def _finalize_(self):
+ # This is only called if the CO_YIELD_INSIDE_TRY flag is set
+ # on the code object. If the frame is still not finished and
+ # finally or except blocks are present at the current
+ # position, then raise a GeneratorExit. Otherwise, there is
+ # no point.
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
if not isinstance(block, LoopBlock):
- self.enqueue_for_destruction(self.space,
- GeneratorIterator.descr_close,
- "interrupting generator of ")
+ self.descr_close()
break
block = block.previous
-
def get_printable_location_genentry(bytecode):
return '%s ' % (bytecode.get_repr(),)
generatorentry_driver = jit.JitDriver(greens=['pycode'],
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -241,12 +241,8 @@
def run(self):
"""Start this frame's execution."""
if self.getcode().co_flags & pycode.CO_GENERATOR:
- if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY:
- from pypy.interpreter.generator import GeneratorIteratorWithDel
- return self.space.wrap(GeneratorIteratorWithDel(self))
- else:
- from pypy.interpreter.generator import GeneratorIterator
- return self.space.wrap(GeneratorIterator(self))
+ from pypy.interpreter.generator import GeneratorIterator
+ return self.space.wrap(GeneratorIterator(self))
else:
return self.execute_frame()
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -127,10 +127,7 @@
""" % (slots, methodname, checks[0], checks[1],
checks[2], checks[3]))
subclasses = {}
- for key, subcls in typedef._subclass_cache.items():
- if key[0] is not space.config:
- continue
- cls = key[1]
+ for cls, subcls in typedef._unique_subclass_cache.items():
subclasses.setdefault(cls, {})
prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls)
assert subcls is prevsubcls
@@ -186,35 +183,20 @@
class W_Level1(W_Root):
def __init__(self, space1):
assert space1 is space
- def __del__(self):
+ self.register_finalizer(space)
+ def _finalize_(self):
space.call_method(w_seen, 'append', space.wrap(1))
- class W_Level2(W_Root):
- def __init__(self, space1):
- assert space1 is space
- def __del__(self):
- self.enqueue_for_destruction(space, W_Level2.destructormeth,
- 'FOO ')
- def destructormeth(self):
- space.call_method(w_seen, 'append', space.wrap(2))
W_Level1.typedef = typedef.TypeDef(
'level1',
__new__ = typedef.generic_new_descr(W_Level1))
- W_Level2.typedef = typedef.TypeDef(
- 'level2',
- __new__ = typedef.generic_new_descr(W_Level2))
#
w_seen = space.newlist([])
W_Level1(space)
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [1]
- #
- w_seen = space.newlist([])
- W_Level2(space)
- gc.collect(); gc.collect()
assert space.str_w(space.repr(w_seen)) == "[]" # not called yet
ec = space.getexecutioncontext()
self.space.user_del_action.perform(ec, None)
- assert space.unwrap(w_seen) == [2]
+ assert space.unwrap(w_seen) == [1] # called by user_del_action
#
w_seen = space.newlist([])
self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
@@ -236,29 +218,17 @@
A4()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [4, 1]
+ assert space.unwrap(w_seen) == [4, 1] # user __del__, and _finalize_
#
w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef)],
+ self.space.appexec([self.space.gettypeobject(W_Level1.typedef)],
"""(level2):
class A5(level2):
pass
A5()
""")
gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [2]
- #
- w_seen = space.newlist([])
- self.space.appexec([self.space.gettypeobject(W_Level2.typedef),
- w_seen],
- """(level2, seen):
- class A6(level2):
- def __del__(self):
- seen.append(6)
- A6()
- """)
- gc.collect(); gc.collect()
- assert space.unwrap(w_seen) == [6, 2]
+ assert space.unwrap(w_seen) == [1] # _finalize_ only
def test_multiple_inheritance(self):
class W_A(W_Root):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -24,6 +24,8 @@
self.bases = bases
self.heaptype = False
self.hasdict = '__dict__' in rawdict
+ # no __del__: use an RPython _finalize_() method and register_finalizer
+ assert '__del__' not in rawdict
self.weakrefable = '__weakref__' in rawdict
self.doc = rawdict.pop('__doc__', None)
for base in bases:
@@ -103,26 +105,20 @@
# we need two subclasses of the app-level type, one to add mapdict, and then one
# to add del to not slow down the GC.
-def get_unique_interplevel_subclass(space, cls, needsdel=False):
+def get_unique_interplevel_subclass(space, cls):
"NOT_RPYTHON: initialization-time only"
- if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
- needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = space, cls, needsdel
try:
- return _subclass_cache[key]
+ return _unique_subclass_cache[cls]
except KeyError:
- # XXX can save a class if cls already has a __del__
- if needsdel:
- cls = get_unique_interplevel_subclass(space, cls, False)
- subcls = _getusercls(space, cls, needsdel)
- assert key not in _subclass_cache
- _subclass_cache[key] = subcls
+ subcls = _getusercls(cls)
+ assert cls not in _unique_subclass_cache
+ _unique_subclass_cache[cls] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
-_subclass_cache = {}
+_unique_subclass_cache = {}
-def _getusercls(space, cls, wants_del, reallywantdict=False):
+def _getusercls(cls, reallywantdict=False):
from rpython.rlib import objectmodel
from pypy.objspace.std.objectobject import W_ObjectObject
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
@@ -132,11 +128,10 @@
typedef = cls.typedef
name = cls.__name__ + "User"
- mixins_needed = []
if cls is W_ObjectObject or cls is W_InstanceObject:
- mixins_needed.append(_make_storage_mixin_size_n())
+ base_mixin = _make_storage_mixin_size_n()
else:
- mixins_needed.append(MapdictStorageMixin)
+ base_mixin = MapdictStorageMixin
copy_methods = [BaseUserClassMapdict]
if reallywantdict or not typedef.hasdict:
# the type has no dict, mapdict to provide the dict
@@ -147,44 +142,12 @@
# support
copy_methods.append(MapdictWeakrefSupport)
name += "Weakrefable"
- if wants_del:
- # This subclass comes with an app-level __del__. To handle
- # it, we make an RPython-level __del__ method. This
- # RPython-level method is called directly by the GC and it
- # cannot do random things (calling the app-level __del__ would
- # be "random things"). So instead, we just call here
- # enqueue_for_destruction(), and the app-level __del__ will be
- # called later at a safe point (typically between bytecodes).
- # If there is also an inherited RPython-level __del__, it is
- # called afterwards---not immediately! This base
- # RPython-level __del__ is supposed to run only when the
- # object is not reachable any more. NOTE: it doesn't fully
- # work: see issue #2287.
- name += "Del"
- parent_destructor = getattr(cls, '__del__', None)
- def call_parent_del(self):
- assert isinstance(self, subcls)
- parent_destructor(self)
- def call_applevel_del(self):
- assert isinstance(self, subcls)
- space.userdel(self)
- class Proto(object):
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(space, call_applevel_del,
- 'method __del__ of ')
- if parent_destructor is not None:
- self.enqueue_for_destruction(space, call_parent_del,
- 'internal destructor of ')
- mixins_needed.append(Proto)
class subcls(cls):
user_overridden_class = True
- for base in mixins_needed:
- objectmodel.import_from_mixin(base)
+ objectmodel.import_from_mixin(base_mixin)
for copycls in copy_methods:
_copy_methods(copycls, subcls)
- del subcls.base
subcls.__name__ = name
return subcls
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -44,13 +44,12 @@
self.bases_w = bases
self.w_dict = w_dict
+ def has_user_del(self, space):
+ return self.lookup(space, '__del__') is not None
+
def instantiate(self, space):
cache = space.fromcache(Cache)
- if self.lookup(space, '__del__') is not None:
- w_inst = cache.cls_with_del(space, self)
- else:
- w_inst = cache.cls_without_del(space, self)
- return w_inst
+ return cache.InstanceObjectCls(space, self)
def getdict(self, space):
return self.w_dict
@@ -132,9 +131,9 @@
self.setbases(space, w_value)
return
elif name == "__del__":
- if self.lookup(space, name) is None:
+ if not self.has_user_del(space):
msg = ("a __del__ method added to an existing class will "
- "not be called")
+ "only be called on instances made from now on")
space.warn(space.wrap(msg), space.w_RuntimeWarning)
space.setitem(self.w_dict, w_attr, w_value)
@@ -184,14 +183,11 @@
if hasattr(space, 'is_fake_objspace'):
# hack: with the fake objspace, we don't want to see typedef's
# _getusercls() at all
- self.cls_without_del = W_InstanceObject
- self.cls_with_del = W_InstanceObject
+ self.InstanceObjectCls = W_InstanceObject
return
- self.cls_without_del = _getusercls(
- space, W_InstanceObject, False, reallywantdict=True)
- self.cls_with_del = _getusercls(
- space, W_InstanceObject, True, reallywantdict=True)
+ self.InstanceObjectCls = _getusercls(
+ W_InstanceObject, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
@@ -297,12 +293,15 @@
class W_InstanceObject(W_Root):
def __init__(self, space, w_class):
# note that user_setup is overridden by the typedef.py machinery
+ self.space = space
self.user_setup(space, space.gettypeobject(self.typedef))
assert isinstance(w_class, W_ClassObject)
self.w_class = w_class
+ if w_class.has_user_del(space):
+ space.finalizer_queue.register_finalizer(self)
def user_setup(self, space, w_subtype):
- self.space = space
+ pass
def set_oldstyle_class(self, space, w_class):
if w_class is None or not isinstance(w_class, W_ClassObject):
@@ -368,8 +367,7 @@
self.set_oldstyle_class(space, w_value)
return
if name == '__del__' and w_meth is None:
- cache = space.fromcache(Cache)
- if (not isinstance(self, cache.cls_with_del)
+ if (not self.w_class.has_user_del(space)
and self.getdictvalue(space, '__del__') is None):
msg = ("a __del__ method added to an instance with no "
"__del__ in the class will not be called")
@@ -646,13 +644,14 @@
raise oefmt(space.w_TypeError, "instance has no next() method")
return space.call_function(w_func)
- def descr_del(self, space):
- # Note that this is called from executioncontext.UserDelAction
- # via the space.userdel() method.
+ def _finalize_(self):
+ space = self.space
w_func = self.getdictvalue(space, '__del__')
if w_func is None:
w_func = self.getattr_from_class(space, '__del__')
if w_func is not None:
+ if self.space.user_del_action.gc_disabled(self):
+ return
space.call_function(w_func)
def descr_exit(self, space, w_type, w_value, w_tb):
@@ -729,7 +728,6 @@
__pow__ = interp2app(W_InstanceObject.descr_pow),
__rpow__ = interp2app(W_InstanceObject.descr_rpow),
next = interp2app(W_InstanceObject.descr_next),
- __del__ = interp2app(W_InstanceObject.descr_del),
__exit__ = interp2app(W_InstanceObject.descr_exit),
__dict__ = dict_descr,
**rawdict
diff --git a/pypy/module/_cffi_backend/allocator.py b/pypy/module/_cffi_backend/allocator.py
--- a/pypy/module/_cffi_backend/allocator.py
+++ b/pypy/module/_cffi_backend/allocator.py
@@ -45,14 +45,11 @@
rffi.c_memset(rffi.cast(rffi.VOIDP, ptr), 0,
rffi.cast(rffi.SIZE_T, datasize))
#
- if self.w_free is None:
- # use this class which does not have a __del__, but still
- # keeps alive w_raw_cdata
- res = cdataobj.W_CDataNewNonStdNoFree(space, ptr, ctype, length)
- else:
- res = cdataobj.W_CDataNewNonStdFree(space, ptr, ctype, length)
+ res = cdataobj.W_CDataNewNonStd(space, ptr, ctype, length)
+ res.w_raw_cdata = w_raw_cdata
+ if self.w_free is not None:
res.w_free = self.w_free
- res.w_raw_cdata = w_raw_cdata
+ res.register_finalizer(space)
return res
@unwrap_spec(w_init=WrappedDefault(None))
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -449,22 +449,11 @@
lltype.free(self._ptr, flavor='raw')
-class W_CDataNewNonStdNoFree(W_CDataNewOwning):
- """Subclass using a non-standard allocator, no free()"""
- _attrs_ = ['w_raw_cdata']
+class W_CDataNewNonStd(W_CDataNewOwning):
+ """Subclass using a non-standard allocator"""
+ _attrs_ = ['w_raw_cdata', 'w_free']
-class W_CDataNewNonStdFree(W_CDataNewNonStdNoFree):
- """Subclass using a non-standard allocator, with a free()"""
- _attrs_ = ['w_free']
-
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space,
- W_CDataNewNonStdFree.call_destructor,
- 'destructor of ')
-
- def call_destructor(self):
- assert isinstance(self, W_CDataNewNonStdFree)
+ def _finalize_(self):
self.space.call_function(self.w_free, self.w_raw_cdata)
@@ -552,14 +541,9 @@
W_CData.__init__(self, space, cdata, ctype)
self.w_original_cdata = w_original_cdata
self.w_destructor = w_destructor
+ self.register_finalizer(space)
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_CDataGCP.call_destructor,
- 'destructor of ')
-
- def call_destructor(self):
- assert isinstance(self, W_CDataGCP)
+ def _finalize_(self):
w_destructor = self.w_destructor
if w_destructor is not None:
self.w_destructor = None
diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py
--- a/pypy/module/_cffi_backend/cdlopen.py
+++ b/pypy/module/_cffi_backend/cdlopen.py
@@ -25,10 +25,13 @@
raise wrap_dlopenerror(ffi.space, e, filename)
W_LibObject.__init__(self, ffi, filename)
self.libhandle = handle
+ self.register_finalizer(ffi.space)
- def __del__(self):
- if self.libhandle:
- dlclose(self.libhandle)
+ def _finalize_(self):
+ h = self.libhandle
+ if h != rffi.cast(DLLHANDLE, 0):
+ self.libhandle = rffi.cast(DLLHANDLE, 0)
+ dlclose(h)
def cdlopen_fetch(self, name):
if not self.libhandle:
diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -15,7 +15,6 @@
class W_Library(W_Root):
_immutable_ = True
- handle = rffi.cast(DLLHANDLE, 0)
def __init__(self, space, filename, flags):
self.space = space
@@ -27,8 +26,9 @@
except DLOpenError as e:
raise wrap_dlopenerror(space, e, filename)
self.name = filename
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
h = self.handle
if h != rffi.cast(DLLHANDLE, 0):
self.handle = rffi.cast(DLLHANDLE, 0)
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -43,22 +43,18 @@
def __init__(self, space):
self.space = space
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
# assume that the file and stream objects are only visible in the
- # thread that runs __del__, so no race condition should be possible
- self.clear_all_weakrefs()
+ # thread that runs _finalize_, so no race condition should be
+ # possible and no locking is done here.
if self.stream is not None:
- self.enqueue_for_destruction(self.space, W_File.destructor,
- 'close() method of ')
-
- def destructor(self):
- assert isinstance(self, W_File)
- try:
- self.direct_close()
- except StreamErrors as e:
- operr = wrap_streamerror(self.space, e, self.w_name)
- raise operr
+ try:
+ self.direct_close()
+ except StreamErrors as e:
+ operr = wrap_streamerror(self.space, e, self.w_name)
+ raise operr
def fdopenstream(self, stream, fd, mode, w_name=None):
self.fd = fd
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -76,11 +76,14 @@
except:
lltype.free(ctx, flavor='raw')
raise
+ self.register_finalizer(space)
- def __del__(self):
- if self.ctx:
- ropenssl.EVP_MD_CTX_cleanup(self.ctx)
- lltype.free(self.ctx, flavor='raw')
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
+ ropenssl.EVP_MD_CTX_cleanup(ctx)
+ lltype.free(ctx, flavor='raw')
def digest_type_by_name(self, space):
digest_type = ropenssl.EVP_get_digestbyname(self.name)
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -952,9 +952,15 @@
self.w_writer = None
raise
- def __del__(self):
- self.clear_all_weakrefs()
+ def _finalize_(self):
# Don't call the base __del__: do not close the files!
+ # Usually the _finalize_() method is not called at all because
+ # we set 'needs_to_finalize = False' in this class, so
+ # W_IOBase.__init__() won't call register_finalizer().
+ # However, this method might still be called: if the user
+ # makes an app-level subclass and adds a custom __del__.
+ pass
+ needs_to_finalize = False
# forward to reader
for method in ['read', 'peek', 'read1', 'readinto', 'readable']:
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -59,6 +59,8 @@
self.__IOBase_closed = False
if add_to_autoflusher:
get_autoflusher(space).add(self)
+ if self.needs_to_finalize:
+ self.register_finalizer(space)
def getdict(self, space):
return self.w_dict
@@ -71,13 +73,7 @@
return True
return False
- def __del__(self):
- self.clear_all_weakrefs()
- self.enqueue_for_destruction(self.space, W_IOBase.destructor,
- 'internal __del__ of ')
-
- def destructor(self):
- assert isinstance(self, W_IOBase)
+ def _finalize_(self):
space = self.space
w_closed = space.findattr(self, space.wrap('closed'))
try:
@@ -90,6 +86,7 @@
# equally as bad, and potentially more frequent (because of
# shutdown issues).
pass
+ needs_to_finalize = True
def _CLOSED(self):
# Use this macro whenever you want to check the internal `closed`
diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py
--- a/pypy/module/_multibytecodec/interp_incremental.py
+++ b/pypy/module/_multibytecodec/interp_incremental.py
@@ -20,8 +20,9 @@
self.codec = codec.codec
self.name = codec.name
self._initialize()
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
self._free()
def reset_w(self):
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -40,14 +40,17 @@
BUFFER_SIZE = 1024
buffer = lltype.nullptr(rffi.CCHARP.TO)
- def __init__(self, flags):
+ def __init__(self, space, flags):
self.flags = flags
self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE,
flavor='raw')
+ self.register_finalizer(space)
- def __del__(self):
- if self.buffer:
- lltype.free(self.buffer, flavor='raw')
+ def _finalize_(self):
+ buf = self.buffer
+ if buf:
+ self.buffer = lltype.nullptr(rffi.CCHARP.TO)
+ lltype.free(buf, flavor='raw')
try:
self.do_close()
except OSError:
@@ -242,7 +245,7 @@
def __init__(self, space, fd, flags):
if fd == self.INVALID_HANDLE_VALUE or fd < 0:
raise oefmt(space.w_IOError, "invalid handle %d", fd)
- W_BaseConnection.__init__(self, flags)
+ W_BaseConnection.__init__(self, space, flags)
self.fd = fd
@unwrap_spec(fd=int, readable=bool, writable=bool)
@@ -363,8 +366,8 @@
if sys.platform == 'win32':
from rpython.rlib.rwin32 import INVALID_HANDLE_VALUE
- def __init__(self, handle, flags):
- W_BaseConnection.__init__(self, flags)
+ def __init__(self, space, handle, flags):
+ W_BaseConnection.__init__(self, space, flags)
self.handle = handle
@unwrap_spec(readable=bool, writable=bool)
@@ -375,7 +378,7 @@
flags = (readable and READABLE) | (writable and WRITABLE)
self = space.allocate_instance(W_PipeConnection, w_subtype)
- W_PipeConnection.__init__(self, handle, flags)
+ W_PipeConnection.__init__(self, space, handle, flags)
return space.wrap(self)
def descr_repr(self, space):
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -430,11 +430,12 @@
class W_SemLock(W_Root):
- def __init__(self, handle, kind, maxvalue):
+ def __init__(self, space, handle, kind, maxvalue):
self.handle = handle
self.kind = kind
self.count = 0
self.maxvalue = maxvalue
+ self.register_finalizer(space)
def kind_get(self, space):
return space.newint(self.kind)
@@ -508,7 +509,7 @@
@unwrap_spec(kind=int, maxvalue=int)
def rebuild(space, w_cls, w_handle, kind, maxvalue):
self = space.allocate_instance(W_SemLock, w_cls)
- self.__init__(handle_w(space, w_handle), kind, maxvalue)
+ self.__init__(space, handle_w(space, w_handle), kind, maxvalue)
return space.wrap(self)
def enter(self, space):
@@ -517,7 +518,7 @@
def exit(self, space, __args__):
self.release(space)
- def __del__(self):
+ def _finalize_(self):
delete_semaphore(self.handle)
@unwrap_spec(kind=int, value=int, maxvalue=int)
@@ -534,7 +535,7 @@
raise wrap_oserror(space, e)
self = space.allocate_instance(W_SemLock, w_subtype)
- self.__init__(handle, kind, maxvalue)
+ self.__init__(space, handle, kind, maxvalue)
return space.wrap(self)
diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -4,7 +4,7 @@
from pypy.interpreter.function import Function, Method
from pypy.interpreter.module import Module
from pypy.interpreter.pytraceback import PyTraceback
-from pypy.interpreter.generator import GeneratorIteratorWithDel
+from pypy.interpreter.generator import GeneratorIterator
from rpython.rlib.objectmodel import instantiate
from pypy.interpreter.gateway import unwrap_spec
from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject
@@ -59,7 +59,7 @@
return space.wrap(tb)
def generator_new(space):
- new_generator = instantiate(GeneratorIteratorWithDel)
+ new_generator = instantiate(GeneratorIterator)
return space.wrap(new_generator)
@unwrap_spec(current=int, remaining=int, step=int)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -278,6 +278,8 @@
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
self.ssl = libssl_SSL_new(w_ctx.ctx) # new ssl struct
+ self.register_finalizer(space)
+
index = compute_unique_id(self)
libssl_SSL_set_app_data(self.ssl, rffi.cast(rffi.VOIDP, index))
SOCKET_STORAGE.set(index, self)
@@ -317,16 +319,15 @@
self.ssl_sock_weakref_w = None
return self
- def __del__(self):
- self.enqueue_for_destruction(self.space, _SSLSocket.destructor,
- '__del__() method of ')
-
- def destructor(self):
- assert isinstance(self, _SSLSocket)
- if self.peer_cert:
- libssl_X509_free(self.peer_cert)
- if self.ssl:
- libssl_SSL_free(self.ssl)
+ def _finalize_(self):
+ peer_cert = self.peer_cert
+ if peer_cert:
+ self.peer_cert = lltype.nullptr(X509.TO)
+ libssl_X509_free(peer_cert)
+ ssl = self.ssl
+ if ssl:
+ self.ssl = lltype.nullptr(SSL.TO)
+ libssl_SSL_free(ssl)
@unwrap_spec(data='bufferstr')
def write(self, space, data):
@@ -1285,6 +1286,7 @@
self = space.allocate_instance(_SSLContext, w_subtype)
self.ctx = ctx
self.check_hostname = False
+ self.register_finalizer(space)
options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
if protocol != PY_SSL_VERSION_SSL2:
options |= SSL_OP_NO_SSLv2
@@ -1308,8 +1310,11 @@
return self
- def __del__(self):
- libssl_SSL_CTX_free(self.ctx)
+ def _finalize_(self):
+ ctx = self.ctx
+ if ctx:
+ self.ctx = lltype.nullptr(SSL_CTX.TO)
+ libssl_SSL_CTX_free(ctx)
@unwrap_spec(server_side=int)
def descr_wrap_socket(self, space, w_sock, server_side, w_server_hostname=None, w_ssl_sock=None):
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -3,7 +3,8 @@
from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, ObjSpace
from pypy.interpreter.typedef import TypeDef
-from rpython.rlib import jit
+from pypy.interpreter.executioncontext import AsyncAction, report_error
+from rpython.rlib import jit, rgc
from rpython.rlib.rshrinklist import AbstractShrinkList
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rweakref import dead_ref
@@ -16,9 +17,12 @@
class WeakrefLifeline(W_Root):
+ typedef = None
+
cached_weakref = None
cached_proxy = None
other_refs_weak = None
+ has_callbacks = False
def __init__(self, space):
self.space = space
@@ -99,31 +103,10 @@
return w_ref
return space.w_None
-
-class WeakrefLifelineWithCallbacks(WeakrefLifeline):
-
- def __init__(self, space, oldlifeline=None):
- self.space = space
- if oldlifeline is not None:
- self.cached_weakref = oldlifeline.cached_weakref
- self.cached_proxy = oldlifeline.cached_proxy
- self.other_refs_weak = oldlifeline.other_refs_weak
-
- def __del__(self):
- """This runs when the interp-level object goes away, and allows
- its lifeline to go away. The purpose of this is to activate the
- callbacks even if there is no __del__ method on the interp-level
- W_Root subclass implementing the object.
- """
- if self.other_refs_weak is None:
- return
- items = self.other_refs_weak.items()
- for i in range(len(items)-1, -1, -1):
- w_ref = items[i]()
- if w_ref is not None and w_ref.w_callable is not None:
- w_ref.enqueue_for_destruction(self.space,
- W_WeakrefBase.activate_callback,
- 'weakref callback of ')
+ def enable_callbacks(self):
+ if not self.has_callbacks:
+ self.space.finalizer_queue.register_finalizer(self)
+ self.has_callbacks = True
@jit.dont_look_inside
def make_weakref_with_callback(self, w_subtype, w_obj, w_callable):
@@ -131,6 +114,7 @@
w_ref = space.allocate_instance(W_Weakref, w_subtype)
W_Weakref.__init__(w_ref, space, w_obj, w_callable)
self.append_wref_to(w_ref)
+ self.enable_callbacks()
return w_ref
@jit.dont_look_inside
@@ -141,8 +125,33 @@
else:
w_proxy = W_Proxy(space, w_obj, w_callable)
self.append_wref_to(w_proxy)
+ self.enable_callbacks()
return w_proxy
+ def _finalize_(self):
+ """This is called at the end, if enable_callbacks() was invoked.
+ It activates the callbacks.
+ """
+ if self.other_refs_weak is None:
+ return
+ #
+ # If this is set, then we're in the 'gc.disable()' mode. In that
+ # case, don't invoke the callbacks now.
+ if self.space.user_del_action.gc_disabled(self):
+ return
+ #
+ items = self.other_refs_weak.items()
+ self.other_refs_weak = None
+ for i in range(len(items)-1, -1, -1):
+ w_ref = items[i]()
+ if w_ref is not None and w_ref.w_callable is not None:
+ try:
+ w_ref.activate_callback()
+ except Exception as e:
+ report_error(self.space, e,
+ "weakref callback ", w_ref.w_callable)
+
+
# ____________________________________________________________
@@ -163,7 +172,6 @@
self.w_obj_weak = dead_ref
def activate_callback(w_self):
- assert isinstance(w_self, W_WeakrefBase)
w_self.space.call_function(w_self.w_callable, w_self)
def descr__repr__(self, space):
@@ -227,32 +235,16 @@
w_obj.setweakref(space, lifeline)
return lifeline
-def getlifelinewithcallbacks(space, w_obj):
- lifeline = w_obj.getweakref()
- if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None
- oldlifeline = lifeline
- lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline)
- w_obj.setweakref(space, lifeline)
- return lifeline
-
-
-def get_or_make_weakref(space, w_subtype, w_obj):
- return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj)
-
-
-def make_weakref_with_callback(space, w_subtype, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
-
def descr__new__weakref(space, w_subtype, w_obj, w_callable=None,
__args__=None):
if __args__.arguments_w:
raise oefmt(space.w_TypeError, "__new__ expected at most 2 arguments")
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_weakref(space, w_subtype, w_obj)
+ return lifeline.get_or_make_weakref(w_subtype, w_obj)
else:
- return make_weakref_with_callback(space, w_subtype, w_obj, w_callable)
+ return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
W_Weakref.typedef = TypeDef("weakref",
__doc__ = """A weak reference to an object 'obj'. A 'callback' can be given,
@@ -308,23 +300,15 @@
return space.call_args(w_obj, __args__)
-def get_or_make_proxy(space, w_obj):
- return getlifeline(space, w_obj).get_or_make_proxy(w_obj)
-
-
-def make_proxy_with_callback(space, w_obj, w_callable):
- lifeline = getlifelinewithcallbacks(space, w_obj)
- return lifeline.make_proxy_with_callback(w_obj, w_callable)
-
-
def proxy(space, w_obj, w_callable=None):
"""Create a proxy object that weakly references 'obj'.
'callback', if given, is called with the proxy as an argument when 'obj'
is about to be finalized."""
+ lifeline = getlifeline(space, w_obj)
if space.is_none(w_callable):
- return get_or_make_proxy(space, w_obj)
+ return lifeline.get_or_make_proxy(w_obj)
else:
- return make_proxy_with_callback(space, w_obj, w_callable)
+ return lifeline.make_proxy_with_callback(w_obj, w_callable)
def descr__new__proxy(space, w_subtype, w_obj, w_callable=None):
raise oefmt(space.w_TypeError, "cannot create 'weakproxy' instances")
@@ -345,7 +329,7 @@
proxy_typedef_dict = {}
callable_proxy_typedef_dict = {}
-special_ops = {'repr': True, 'userdel': True, 'hash': True}
+special_ops = {'repr': True, 'hash': True}
for opname, _, arity, special_methods in ObjSpace.MethodTable:
if opname in special_ops or not special_methods:
diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py
--- a/pypy/module/_weakref/test/test_weakref.py
+++ b/pypy/module/_weakref/test/test_weakref.py
@@ -1,6 +1,9 @@
class AppTestWeakref(object):
spaceconfig = dict(usemodules=('_weakref',))
-
+
+ def setup_class(cls):
+ cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
+
def test_simple(self):
import _weakref, gc
class A(object):
@@ -287,6 +290,9 @@
assert a1 is None
def test_del_and_callback_and_id(self):
+ if not self.runappdirect:
+ skip("the id() doesn't work correctly in __del__ and "
+ "callbacks before translation")
import gc, weakref
seen_del = []
class A(object):
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -518,8 +518,14 @@
def __init__(self, space, compresslevel):
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self._init_bz2comp(compresslevel)
+ try:
+ self.running = False
+ self._init_bz2comp(compresslevel)
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2comp(self, compresslevel):
if compresslevel < 1 or compresslevel > 9:
@@ -532,9 +538,12 @@
self.running = True
- def __del__(self):
- BZ2_bzCompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzCompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def compress(self, data):
@@ -621,10 +630,16 @@
self.space = space
self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True)
- self.running = False
- self.unused_data = ""
+ try:
+ self.running = False
+ self.unused_data = ""
- self._init_bz2decomp()
+ self._init_bz2decomp()
+ except:
+ lltype.free(self.bzs, flavor='raw')
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ raise
+ self.register_finalizer(space)
def _init_bz2decomp(self):
bzerror = BZ2_bzDecompressInit(self.bzs, 0, 0)
@@ -633,9 +648,12 @@
self.running = True
- def __del__(self):
- BZ2_bzDecompressEnd(self.bzs)
- lltype.free(self.bzs, flavor='raw')
+ def _finalize_(self):
+ bzs = self.bzs
+ if bzs:
+ self.bzs = lltype.nullptr(bz_stream.TO)
+ BZ2_bzDecompressEnd(bzs)
+ lltype.free(bzs, flavor='raw')
@unwrap_spec(data='bufferstr')
def decompress(self, data):
diff --git a/pypy/module/bz2/test/support.py b/pypy/module/bz2/test/support.py
--- a/pypy/module/bz2/test/support.py
+++ b/pypy/module/bz2/test/support.py
@@ -10,5 +10,6 @@
#
while tries and ll2ctypes.ALLOCATED:
gc.collect() # to make sure we disallocate buffers
+ self.space.getexecutioncontext()._run_finalizers_now()
tries -= 1
assert not ll2ctypes.ALLOCATED
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -1020,9 +1020,12 @@
class W_CPPInstance(W_Root):
- _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns']
+ _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns',
+ 'finalizer_registered']
_immutable_fields_ = ["cppclass", "isref"]
+ finalizer_registered = False
+
def __init__(self, space, cppclass, rawobject, isref, python_owns):
self.space = space
self.cppclass = cppclass
@@ -1032,6 +1035,12 @@
assert not isref or not python_owns
self.isref = isref
self.python_owns = python_owns
+ self._opt_register_finalizer()
+
+ def _opt_register_finalizer(self):
+ if self.python_owns and not self.finalizer_registered:
+ self.register_finalizer(self.space)
+ self.finalizer_registered = True
def _nullcheck(self):
if not self._rawobject or (self.isref and not self.get_rawobject()):
@@ -1045,6 +1054,7 @@
@unwrap_spec(value=bool)
def fset_python_owns(self, space, value):
self.python_owns = space.is_true(value)
+ self._opt_register_finalizer()
def get_cppthis(self, calling_scope):
return self.cppclass.get_cppthis(self, calling_scope)
@@ -1143,16 +1153,14 @@
(self.cppclass.name, rffi.cast(rffi.ULONG, self.get_rawobject())))
def destruct(self):
- assert isinstance(self, W_CPPInstance)
if self._rawobject and not self.isref:
memory_regulator.unregister(self)
capi.c_destruct(self.space, self.cppclass, self._rawobject)
self._rawobject = capi.C_NULL_OBJECT
- def __del__(self):
+ def _finalize_(self):
if self.python_owns:
- self.enqueue_for_destruction(self.space, W_CPPInstance.destruct,
- '__del__() method of ')
+ self.destruct()
W_CPPInstance.typedef = TypeDef(
'CPPInstance',
diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c
--- a/pypy/module/cpyext/src/abstract.c
+++ b/pypy/module/cpyext/src/abstract.c
@@ -326,3 +326,9 @@
return tmp;
}
+/* for binary compatibility with 5.1 */
+PyAPI_FUNC(void) PyPyObject_Del(PyObject *);
+void PyPyObject_Del(PyObject *op)
+{
+ PyObject_FREE(op);
+}
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -927,31 +927,62 @@
("fetchFooType", "METH_VARARGS",
"""
PyObject *o;
+ Foo_Type.tp_basicsize = sizeof(FooObject);
Foo_Type.tp_dealloc = &dealloc_foo;
- Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
+ Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
+ | Py_TPFLAGS_BASETYPE;
Foo_Type.tp_new = &new_foo;
Foo_Type.tp_free = &PyObject_Del;
if (PyType_Ready(&Foo_Type) < 0) return NULL;
o = PyObject_New(PyObject, &Foo_Type);
+ init_foo(o);
Py_DECREF(o); /* calls dealloc_foo immediately */
Py_INCREF(&Foo_Type);
return (PyObject *)&Foo_Type;
"""),
+ ("newInstance", "METH_O",
+ """
+ PyTypeObject *tp = (PyTypeObject *)args;
+ PyObject *e = PyTuple_New(0);
+ PyObject *o = tp->tp_new(tp, e, NULL);
+ Py_DECREF(e);
+ return o;
+ """),
("getCounter", "METH_VARARGS",
"""
return PyInt_FromLong(foo_counter);
""")], prologue=
"""
+ typedef struct {
+ PyObject_HEAD
+ int someval[99];
+ } FooObject;
static int foo_counter = 1000;
static void dealloc_foo(PyObject *foo) {
+ int i;
foo_counter += 10;
+ for (i = 0; i < 99; i++)
+ if (((FooObject *)foo)->someval[i] != 1000 + i)
+ foo_counter += 100000; /* error! */
+ Py_TYPE(foo)->tp_free(foo);
+ }
+ static void init_foo(PyObject *o)
+ {
+ int i;
+ if (o->ob_type->tp_basicsize < sizeof(FooObject))
+ abort();
+ for (i = 0; i < 99; i++)
+ ((FooObject *)o)->someval[i] = 1000 + i;
}
static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k)
{
+ PyObject *o;
foo_counter += 1000;
- return t->tp_alloc(t, 0);
+ o = t->tp_alloc(t, 0);
+ init_foo(o);
+ return o;
}
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -971,9 +1002,24 @@
#
class Bar(Foo):
pass
+ assert Foo.__new__ is Bar.__new__
Bar(); Bar()
for i in range(10):
if module.getCounter() >= 5050:
break
self.debug_collect()
assert module.getCounter() == 5050
+ #
+ module.newInstance(Foo)
+ for i in range(10):
+ if module.getCounter() >= 6060:
+ break
+ self.debug_collect()
+ assert module.getCounter() == 6060
+ #
+ module.newInstance(Bar)
+ for i in range(10):
+ if module.getCounter() >= 7070:
+ break
+ self.debug_collect()
+ assert module.getCounter() == 7070
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -196,6 +196,10 @@
def update_all_slots(space, w_type, pto):
# XXX fill slots in pto
+ # Not very sure about it, but according to
+ # test_call_tp_dealloc_when_created_from_python, we should not
+ # overwrite slots that are already set: these ones are probably
+ # coming from a parent C type.
typedef = w_type.layout.typedef
for method_name, slot_name, slot_names, slot_func in slotdefs_for_tp_slots:
@@ -223,7 +227,8 @@
# XXX special case wrapper-functions and use a "specific" slot func
if len(slot_names) == 1:
- setattr(pto, slot_names[0], slot_func_helper)
+ if not getattr(pto, slot_names[0]):
+ setattr(pto, slot_names[0], slot_func_helper)
else:
assert len(slot_names) == 2
struct = getattr(pto, slot_names[0])
@@ -240,7 +245,8 @@
struct = lltype.malloc(STRUCT_TYPE, flavor='raw', zero=True)
setattr(pto, slot_names[0], struct)
- setattr(struct, slot_names[1], slot_func_helper)
+ if not getattr(struct, slot_names[1]):
+ setattr(struct, slot_names[1], slot_func_helper)
def add_operators(space, dict_w, pto):
# XXX support PyObject_HashNotImplemented
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -38,13 +38,23 @@
return space.newbool(space.user_del_action.enabled_at_app_level)
def enable_finalizers(space):
- if space.user_del_action.finalizers_lock_count == 0:
+ uda = space.user_del_action
+ if uda.finalizers_lock_count == 0:
raise oefmt(space.w_ValueError, "finalizers are already enabled")
- space.user_del_action.finalizers_lock_count -= 1
- space.user_del_action.fire()
+ uda.finalizers_lock_count -= 1
+ if uda.finalizers_lock_count == 0:
+ pending = uda.pending_with_disabled_del
+ uda.pending_with_disabled_del = None
+ if pending is not None:
+ for i in range(len(pending)):
+ uda._call_finalizer(pending[i])
+ pending[i] = None # clear the list as we progress
def disable_finalizers(space):
- space.user_del_action.finalizers_lock_count += 1
+ uda = space.user_del_action
+ uda.finalizers_lock_count += 1
+ if uda.pending_with_disabled_del is None:
+ uda.pending_with_disabled_del = []
# ____________________________________________________________
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -3,7 +3,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.argument import Arguments
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
from rpython.rlib.rarithmetic import LONG_BIT, maxint, _get_bitsize
from rpython.tool.sourcetools import func_with_new_name
from rpython.rlib.rawstorage import (
@@ -1534,6 +1534,7 @@
self.steps = alloc_raw_storage(0, track_allocation=False)
self.dims_steps_set = False
+ @rgc.must_be_light_finalizer
def __del__(self):
free_raw_storage(self.dims, track_allocation=False)
free_raw_storage(self.steps, track_allocation=False)
diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py
--- a/pypy/module/pyexpat/interp_pyexpat.py
+++ b/pypy/module/pyexpat/interp_pyexpat.py
@@ -421,8 +421,11 @@
class W_XMLParserType(W_Root):
+ id = -1
+
def __init__(self, space, parser, w_intern):
self.itself = parser
+ self.register_finalizer(space)
self.w_intern = w_intern
@@ -444,14 +447,17 @@
CallbackData(space, self))
XML_SetUserData(self.itself, rffi.cast(rffi.VOIDP, self.id))
- def __del__(self):
+ def _finalize_(self):
if XML_ParserFree: # careful with CPython interpreter shutdown
- XML_ParserFree(self.itself)
- if global_storage:
+ if self.itself:
+ XML_ParserFree(self.itself)
+ self.itself = lltype.nullptr(XML_Parser.TO)
+ if global_storage and self.id >= 0:
try:
global_storage.free_nonmoving_id(self.id)
except KeyError:
pass # maybe global_storage.clear() was already called
+ self.id = -1
@unwrap_spec(flag=int)
def SetParamEntityParsing(self, space, flag):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
--- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
@@ -28,10 +28,10 @@
p65 = getfield_gc_r(p14, descr=)
guard_value(p65, ConstPtr(ptr45), descr=...)
p66 = getfield_gc_r(p14, descr=)
- guard_nonnull_class(p66, ..., descr=...)
+ guard_nonnull(p66, descr=...)
p67 = force_token()
setfield_gc(p0, p67, descr=)
- p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=)
+ p68 = call_may_force_r(ConstClass(WeakrefLifeline.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
guard_nonnull_class(p68, ..., descr=...)
diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py
--- a/pypy/module/select/interp_epoll.py
+++ b/pypy/module/select/interp_epoll.py
@@ -80,6 +80,7 @@
class W_Epoll(W_Root):
def __init__(self, space, epfd):
self.epfd = epfd
+ self.register_finalizer(space)
@unwrap_spec(sizehint=int)
def descr__new__(space, w_subtype, sizehint=-1):
@@ -98,7 +99,7 @@
def descr_fromfd(space, w_cls, fd):
return space.wrap(W_Epoll(space, fd))
- def __del__(self):
+ def _finalize_(self):
self.close()
def check_closed(self, space):
diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py
--- a/pypy/module/select/interp_kqueue.py
+++ b/pypy/module/select/interp_kqueue.py
@@ -109,6 +109,7 @@
class W_Kqueue(W_Root):
def __init__(self, space, kqfd):
self.kqfd = kqfd
+ self.register_finalizer(space)
def descr__new__(space, w_subtype):
kqfd = syscall_kqueue()
@@ -120,7 +121,7 @@
def descr_fromfd(space, w_cls, fd):
return space.wrap(W_Kqueue(space, fd))
- def __del__(self):
+ def _finalize_(self):
self.close()
def get_closed(self):
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -148,8 +148,9 @@
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.deflateEnd(self.stream)
@@ -258,8 +259,9 @@
raise zlib_error(space, e.msg)
except ValueError:
raise oefmt(space.w_ValueError, "Invalid initialization option")
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
"""Automatically free the resources used by the stream."""
if self.stream:
rzlib.inflateEnd(self.stream)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -440,11 +440,6 @@
raise oefmt(space.w_TypeError,
"__hash__() should return an int or long")
- def userdel(space, w_obj):
- w_del = space.lookup(w_obj, '__del__')
- if w_del is not None:
- space.get_and_call_function(w_del, w_obj)
-
def cmp(space, w_v, w_w):
if space.is_w(w_v, w_w):
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -357,11 +357,12 @@
if cls.typedef.applevel_subclasses_base is not None:
cls = cls.typedef.applevel_subclasses_base
#
- subcls = get_unique_interplevel_subclass(
- self, cls, w_subtype.needsdel)
+ subcls = get_unique_interplevel_subclass(self, cls)
instance = instantiate(subcls)
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
+ if w_subtype.hasuserdel:
+ self.finalizer_queue.register_finalizer(instance)
else:
raise oefmt(self.w_TypeError,
"%N.__new__(%N): only for the type %N",
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -132,7 +132,7 @@
"flag_sequence_bug_compat",
"flag_map_or_seq", # '?' or 'M' or 'S'
"compares_by_identity_status?",
- 'needsdel',
+ 'hasuserdel',
'weakrefable',
'hasdict',
'layout',
@@ -160,7 +160,7 @@
w_self.bases_w = bases_w
w_self.dict_w = dict_w
w_self.hasdict = False
- w_self.needsdel = False
+ w_self.hasuserdel = False
w_self.weakrefable = False
w_self.w_doc = space.w_None
w_self.weak_subclasses = []
@@ -289,7 +289,7 @@
# compute a tuple that fully describes the instance layout
def get_full_instance_layout(w_self):
layout = w_self.layout
- return (layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable)
+ return (layout, w_self.hasdict, w_self.weakrefable)
def compute_default_mro(w_self):
return compute_C3_mro(w_self.space, w_self)
@@ -986,7 +986,7 @@
hasoldstylebase = True
continue
w_self.hasdict = w_self.hasdict or w_base.hasdict
- w_self.needsdel = w_self.needsdel or w_base.needsdel
+ w_self.hasuserdel = w_self.hasuserdel or w_base.hasuserdel
w_self.weakrefable = w_self.weakrefable or w_base.weakrefable
return hasoldstylebase
@@ -1028,7 +1028,7 @@
if wantweakref:
create_weakref_slot(w_self)
if '__del__' in dict_w:
- w_self.needsdel = True
+ w_self.hasuserdel = True
#
if index_next_extra_slot == base_layout.nslots and not force_new_layout:
return base_layout
diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py
--- a/pypy/tool/pytest/apptest.py
+++ b/pypy/tool/pytest/apptest.py
@@ -7,7 +7,7 @@
# ...unless the -A option ('runappdirect') is passed.
import py
-import sys, textwrap, types
+import sys, textwrap, types, gc
from pypy.interpreter.gateway import app2interp_temp
from pypy.interpreter.error import OperationError
from pypy.interpreter.function import Method
@@ -32,6 +32,7 @@
return traceback
def execute_appex(self, space, target, *args):
+ self.space = space
try:
target(*args)
except OperationError as e:
@@ -64,6 +65,13 @@
code = getattr(func, 'im_func', func).func_code
return "[%s:%s]" % (code.co_filename, code.co_firstlineno)
+ def track_allocations_collect(self):
+ gc.collect()
+ # must also invoke finalizers now; UserDelAction
+ # would not run at all unless invoked explicitly
+ if hasattr(self, 'space'):
+ self.space.getexecutioncontext()._run_finalizers_now()
+
class AppTestMethod(AppTestFunction):
def setup(self):
diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py
--- a/rpython/annotator/classdesc.py
+++ b/rpython/annotator/classdesc.py
@@ -579,6 +579,14 @@
if cls not in FORCE_ATTRIBUTES_INTO_CLASSES:
self.all_enforced_attrs = [] # no attribute allowed
+ if (getattr(cls, '_must_be_light_finalizer_', False) and
+ hasattr(cls, '__del__') and
+ not getattr(cls.__del__, '_must_be_light_finalizer_', False)):
+ raise AnnotatorError(
+ "Class %r is in a class hierarchy with "
+ "_must_be_light_finalizer_ = True: it cannot have a "
+ "finalizer without @rgc.must_be_light_finalizer" % (cls,))
+
def add_source_attribute(self, name, value, mixin=False):
if isinstance(value, property):
# special case for property object
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4584,6 +4584,32 @@
e = py.test.raises(Exception, a.build_types, f, [])
assert str(e.value) == "Don't know how to represent Ellipsis"
+ def test_must_be_light_finalizer(self):
+ from rpython.rlib import rgc
+ @rgc.must_be_light_finalizer
+ class A(object):
+ pass
+ class B(A):
+ def __del__(self):
+ pass
+ class C(A):
+ @rgc.must_be_light_finalizer
+ def __del__(self):
+ pass
+ class D(object):
+ def __del__(self):
+ pass
+ def fb():
+ B()
+ def fc():
+ C()
+ def fd():
+ D()
+ a = self.RPythonAnnotator()
+ a.build_types(fc, [])
+ a.build_types(fd, [])
+ py.test.raises(AnnotatorError, a.build_types, fb, [])
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/conftest.py b/rpython/conftest.py
--- a/rpython/conftest.py
+++ b/rpython/conftest.py
@@ -82,7 +82,13 @@
return
if (not getattr(item.obj, 'dont_track_allocations', False)
and leakfinder.TRACK_ALLOCATIONS):
- item._pypytest_leaks = leakfinder.stop_tracking_allocations(False)
+ kwds = {}
+ try:
+ kwds['do_collection'] = item.track_allocations_collect
+ except AttributeError:
+ pass
+ item._pypytest_leaks = leakfinder.stop_tracking_allocations(False,
+ **kwds)
else: # stop_tracking_allocations() already called
item._pypytest_leaks = None
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -362,6 +362,16 @@
return func
def must_be_light_finalizer(func):
+ """Mark a __del__ method as being a destructor, calling only a limited
+ set of operations. See pypy/doc/discussion/finalizer-order.rst.
+
+ If you use the same decorator on a class, this class and all its
From pypy.commits at gmail.com Tue May 10 05:53:24 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 10 May 2016 02:53:24 -0700 (PDT)
Subject: [pypy-commit] pypy default: fixed issue #2172. the test specified
an invalid parameter for memory protection of the mmap call. powerpc
rejects that parameter
Message-ID: <5731af94.8455c20a.4f164.ffff8131@mx.google.com>
Author: Richard Plangger
Branch:
Changeset: r84349:aa75f1381bfa
Date: 2016-05-10 11:51 +0200
http://bitbucket.org/pypy/pypy/changeset/aa75f1381bfa/
Log: fixed issue #2172. the test specified an invalid parameter for
memory protection of the mmap call. powerpc rejects that parameter
diff --git a/rpython/rlib/test/test_rmmap.py b/rpython/rlib/test/test_rmmap.py
--- a/rpython/rlib/test/test_rmmap.py
+++ b/rpython/rlib/test/test_rmmap.py
@@ -296,7 +296,7 @@
f = open(self.tmpname + "l2", "w+")
f.write("foobar")
f.flush()
- m = mmap.mmap(f.fileno(), 6, prot=~mmap.PROT_WRITE)
+ m = mmap.mmap(f.fileno(), 6, prot=mmap.PROT_READ|mmap.PROT_EXEC)
py.test.raises(RTypeError, m.check_writeable)
py.test.raises(RTypeError, m.check_writeable)
m.close()
From pypy.commits at gmail.com Tue May 10 07:33:49 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 10 May 2016 04:33:49 -0700 (PDT)
Subject: [pypy-commit] pypy new-jit-log: added new tag to store source code
line in binary (done by vmprof/client)
Message-ID: <5731c71d.8a9d1c0a.e4993.ffffcfa3@mx.google.com>
Author: Richard Plangger
Branch: new-jit-log
Changeset: r84350:f8bf0d7c3949
Date: 2016-05-10 13:33 +0200
http://bitbucket.org/pypy/pypy/changeset/f8bf0d7c3949/
Log: added new tag to store source code line in binary (done by
vmprof/client)
diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py
--- a/rpython/rlib/jitlog.py
+++ b/rpython/rlib/jitlog.py
@@ -175,6 +175,7 @@
('MERGE_POINT',),
('COMMON_PREFIX',),
('ABORT_TRACE',),
+ ('SOURCE_CODE',),
]
start = 0x11
From pypy.commits at gmail.com Tue May 10 12:31:37 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 09:31:37 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Remove divisions and modulos
from regular JIT operations, uses oopspec
Message-ID: <57320ce9.2472c20a.7ac37.2af3@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84351:1ad01ba1173b
Date: 2016-05-10 18:31 +0200
http://bitbucket.org/pypy/pypy/changeset/1ad01ba1173b/
Log: Remove divisions and modulos from regular JIT operations, uses
oopspec calls
diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py
--- a/rpython/jit/codewriter/effectinfo.py
+++ b/rpython/jit/codewriter/effectinfo.py
@@ -26,6 +26,11 @@
OS_THREADLOCALREF_GET = 5 # llop.threadlocalref_get
OS_NOT_IN_TRACE = 8 # for calls not recorded in the jit trace
#
+ OS_INT_PY_DIV = 12 # python signed division (neg. corrected)
+ OS_INT_UDIV = 13 # regular unsigned division
+ OS_INT_PY_MOD = 14 # python signed modulo (neg. corrected)
+ OS_INT_UMOD = 15 # regular unsigned modulo
+ #
OS_STR_CONCAT = 22 # "stroruni.concat"
OS_STR_SLICE = 23 # "stroruni.slice"
OS_STR_EQUAL = 24 # "stroruni.equal"
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -1903,15 +1903,19 @@
self.callcontrol.callinfocollection.add(oopspecindex, calldescr, func)
def _handle_int_ovf(self, op, oopspec_name, args):
- assert oopspec_name in ('int.add_ovf', 'int.sub_ovf', 'int.mul_ovf',
- 'int.py_div', 'int.py_mod')
- op0 = SpaceOperation(oopspec_name.replace('.', '_'), args, op.result)
- if oopspec_name in ('int.add_ovf', 'int.mul_ovf'):
- op0 = self._rewrite_symmetric(op0)
- oplist = [op0]
- if oopspec_name.endswith('_ovf'):
- oplist.insert(0, SpaceOperation('-live-', [], None))
- return oplist
+ opname = oopspec_name.replace('.', '_')
+ if oopspec_name in ('int.add_ovf', 'int.sub_ovf', 'int.mul_ovf'):
+ op0 = SpaceOperation(opname, args, op.result)
+ if oopspec_name in ('int.add_ovf', 'int.mul_ovf'):
+ op0 = self._rewrite_symmetric(op0)
+ oplist = [op0]
+ if oopspec_name.endswith('_ovf'):
+ oplist.insert(0, SpaceOperation('-live-', [], None))
+ return oplist
+ else:
+ os = getattr(EffectInfo, 'OS_' + opname.upper())
+ return self._handle_oopspec_call(op, args, os,
+ EffectInfo.EF_ELIDABLE_CANNOT_RAISE)
def _handle_stroruni_call(self, op, oopspec_name, args):
SoU = args[0].concretetype # Ptr(STR) or Ptr(UNICODE)
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -135,6 +135,10 @@
EI.OS_RAW_MALLOC_VARSIZE_CHAR: ([INT], ARRAYPTR),
EI.OS_RAW_FREE: ([ARRAYPTR], lltype.Void),
EI.OS_THREADLOCALREF_GET: ([INT], INT), # for example
+ EI.OS_INT_PY_DIV: ([INT, INT], INT),
+ EI.OS_INT_UDIV: ([INT, INT], INT),
+ EI.OS_INT_PY_MOD: ([INT, INT], INT),
+ EI.OS_INT_UMOD: ([INT, INT], INT),
}
argtypes = argtypes[oopspecindex]
assert argtypes[0] == [v.concretetype for v in op.args[1:]]
@@ -273,7 +277,7 @@
v3 = varoftype(lltype.Signed)
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
- op = SpaceOperation('foobar', [v1, v2], v3)
+ op = SpaceOperation('direct_call', [Constant(opname), v1, v2], v3)
oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
[v1, v2])
op1, op0 = oplist
@@ -293,7 +297,7 @@
v3 = varoftype(lltype.Signed)
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
- op = SpaceOperation('foobar', [v1, v2], v3)
+ op = SpaceOperation('direct_call', [Constant(opname), v1, v2], v3)
oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
[v1, v2])
op1, op0 = oplist
@@ -304,18 +308,19 @@
assert op1.args == []
assert op1.result is None
- at py.test.mark.parametrize('opname', ['py_div', 'py_mod'])
-def test_asymmetric_op_nonovf(opname):
+ at py.test.mark.parametrize('opname', ['py_div', 'udiv', 'py_mod', 'umod'])
+def test_asymmetric_op_residual(opname):
v3 = varoftype(lltype.Signed)
+ tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
- op = SpaceOperation('foobar', [v1, v2], v3)
- oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
- [v1, v2])
- [op0] = oplist
- assert op0.opname == 'int_'+opname
- assert op0.args == [v1, v2]
- assert op0.result == v3
+ op = SpaceOperation('direct_call', [Constant(opname), v1, v2], v3)
+ op0 = tr._handle_int_ovf(op, 'int.'+opname, [v1, v2])
+ assert op0.opname == 'residual_call_ir_i'
+ assert op0.args[0].value == opname # pseudo-function as str
+ expected = ('int_' + opname).upper()
+ assert (op0.args[-1] == 'calldescr-%d' %
+ getattr(effectinfo.EffectInfo, 'OS_' + expected))
def test_calls():
for RESTYPE, with_void, with_i, with_r, with_f in product(
diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -955,11 +955,6 @@
'INT_ADD/2/i',
'INT_SUB/2/i',
'INT_MUL/2/i',
- 'INT_C_DIV/2/i', # C-style handling of negatives (backend only)
- 'INT_PY_DIV/2/i', # Python-style handling of negatives (frontend)
- 'UINT_FLOORDIV/2/i',
- 'INT_C_MOD/2/i', # C-style handling of negatives (backend only)
- 'INT_PY_MOD/2/i', # Python-style handling of negatives (frontend)
'INT_AND/2/i',
'INT_OR/2/i',
'INT_XOR/2/i',
diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py
--- a/rpython/rtyper/rint.py
+++ b/rpython/rtyper/rint.py
@@ -307,8 +307,7 @@
"""Write a simple operation implementing the given 'func'.
It must be an operation that cannot raise.
"""
- if '_ovf' in func or (func.startswith(('mod', 'floordiv'))
- and not hop.s_result.unsigned):
+ if '_ovf' in func or func.startswith(('mod', 'floordiv')):
raise TyperError("%r should not be used here any more" % (func,))
r_result = hop.r_result
@@ -351,8 +350,6 @@
if not any_implicit_exception:
if not func.startswith(('mod', 'floordiv')):
return _rtype_template(hop, func)
- if hop.s_result.unsigned:
- return _rtype_template(hop, func)
repr = hop.r_result
assert repr.lowleveltype != Bool
@@ -399,11 +396,6 @@
raise ZeroDivisionError("integer division")
return ll_int_floordiv(x, y)
-def ll_uint_floordiv_zer(x, y):
- if y == 0:
- raise ZeroDivisionError("unsigned integer division")
- return llop.uint_floordiv(Unsigned, x, y)
-
def ll_int_floordiv_ovf(x, y):
# JIT: intentionally not short-circuited to produce only one guard
# and to remove the check fully if one of the arguments is known
@@ -416,23 +408,44 @@
raise ZeroDivisionError("integer division")
return ll_int_floordiv_ovf(x, y)
-def ll_llong_floordiv(x, y):
- r = llop.llong_floordiv(SignedLongLong, x, y) # <= truncates like in C
- p = r * y
- if y < 0: u = p - x
- else: u = x - p
- return r + (u >> LLONG_BITS_1)
+ at jit.oopspec("int.udiv(x, y)")
+def ll_uint_floordiv(x, y):
+ return llop.uint_floordiv(Unsigned, x, y)
-def ll_llong_floordiv_zer(x, y):
+def ll_uint_floordiv_zer(x, y):
if y == 0:
- raise ZeroDivisionError("longlong division")
- return ll_llong_floordiv(x, y)
+ raise ZeroDivisionError("unsigned integer division")
+ return ll_uint_floordiv(x, y)
-def ll_ullong_floordiv_zer(x, y):
- if y == 0:
- raise ZeroDivisionError("unsigned longlong division")
- return llop.ullong_floordiv(UnsignedLongLong, x, y)
+if SignedLongLong == Signed:
+ ll_llong_floordiv = ll_int_floordiv
+ ll_llong_floordiv_zer = ll_int_floordiv_zer
+ ll_ullong_floordiv = ll_uint_floordiv
+ ll_ullong_floordiv_zer = ll_uint_floordiv_zer
+else:
+ @jit.dont_look_inside
+ def ll_llong_floordiv(x, y):
+ r = llop.llong_floordiv(SignedLongLong, x, y) # <= truncates like in C
+ p = r * y
+ if y < 0: u = p - x
+ else: u = x - p
+ return r + (u >> LLONG_BITS_1)
+ def ll_llong_floordiv_zer(x, y):
+ if y == 0:
+ raise ZeroDivisionError("longlong division")
+ return ll_llong_floordiv(x, y)
+
+ @jit.dont_look_inside
+ def ll_ullong_floordiv(x, y):
+ return llop.ullong_floordiv(UnsignedLongLong, x, y)
+
+ def ll_ullong_floordiv_zer(x, y):
+ if y == 0:
+ raise ZeroDivisionError("unsigned longlong division")
+ return ll_ullong_floordiv(x, y)
+
+ at jit.dont_look_inside
def ll_lllong_floordiv(x, y):
r = llop.lllong_floordiv(SignedLongLongLong, x, y) # <= truncates like in C
p = r * y
@@ -460,11 +473,6 @@
raise ZeroDivisionError
return ll_int_mod(x, y)
-def ll_uint_mod_zer(x, y):
- if y == 0:
- raise ZeroDivisionError
- return llop.uint_mod(Unsigned, x, y)
-
def ll_int_mod_ovf(x, y):
# see comment in ll_int_floordiv_ovf
if (x == -sys.maxint - 1) & (y == -1):
@@ -476,22 +484,43 @@
raise ZeroDivisionError
return ll_int_mod_ovf(x, y)
-def ll_llong_mod(x, y):
- r = llop.llong_mod(SignedLongLong, x, y) # <= truncates like in C
- if y < 0: u = -r
- else: u = r
- return r + (y & (u >> LLONG_BITS_1))
+ at jit.oopspec("int.umod(x, y)")
+def ll_uint_mod(x, y):
+ return llop.uint_mod(Unsigned, x, y)
-def ll_llong_mod_zer(x, y):
+def ll_uint_mod_zer(x, y):
if y == 0:
raise ZeroDivisionError
- return ll_llong_mod(x, y)
+ return ll_uint_mod(x, y)
-def ll_ullong_mod_zer(x, y):
- if y == 0:
- raise ZeroDivisionError
- return llop.ullong_mod(UnsignedLongLong, x, y)
+if SignedLongLong == Signed:
+ ll_llong_mod = ll_int_mod
+ ll_llong_mod_zer = ll_int_mod_zer
+ ll_ullong_mod = ll_uint_mod
+ ll_ullong_mod_zer = ll_uint_mod_zer
+else:
+ @jit.dont_look_inside
+ def ll_llong_mod(x, y):
+ r = llop.llong_mod(SignedLongLong, x, y) # <= truncates like in C
+ if y < 0: u = -r
+ else: u = r
+ return r + (y & (u >> LLONG_BITS_1))
+ def ll_llong_mod_zer(x, y):
+ if y == 0:
+ raise ZeroDivisionError
+ return ll_llong_mod(x, y)
+
+ @jit.dont_look_inside
+ def ll_ullong_mod(x, y):
+ return llop.ullong_mod(UnsignedLongLong, x, y)
+
+ def ll_ullong_mod_zer(x, y):
+ if y == 0:
+ raise ZeroDivisionError
+ return llop.ullong_mod(UnsignedLongLong, x, y)
+
+ at jit.dont_look_inside
def ll_lllong_mod(x, y):
r = llop.lllong_mod(SignedLongLongLong, x, y) # <= truncates like in C
if y < 0: u = -r
From pypy.commits at gmail.com Tue May 10 13:54:19 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 10:54:19 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Finish 1ad01ba1173b for the
front-end
Message-ID: <5732204b.43ecc20a.69786.467a@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84352:59ed937837d3
Date: 2016-05-10 19:54 +0200
http://bitbucket.org/pypy/pypy/changeset/59ed937837d3/
Log: Finish 1ad01ba1173b for the front-end
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1444,7 +1444,7 @@
self.mov(imm0, resloc)
self.mc.CMOVNS(resloc, arglocs[0])
- def genop_int_c_mod(self, op, arglocs, resloc):
+ def XXX_genop_int_c_mod(self, op, arglocs, resloc):
if IS_X86_32:
self.mc.CDQ()
elif IS_X86_64:
@@ -1452,9 +1452,9 @@
self.mc.IDIV_r(ecx.value)
- genop_int_c_div = genop_int_c_mod
+ XXX_genop_int_c_div = XXX_genop_int_c_mod
- def genop_uint_floordiv(self, op, arglocs, resloc):
+ def XXX_genop_uint_floordiv(self, op, arglocs, resloc):
self.mc.XOR_rr(edx.value, edx.value)
self.mc.DIV_r(ecx.value)
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -598,15 +598,15 @@
assert l2 is resultreg
self.rm.possibly_free_var(tmpvar)
- def consider_int_c_mod(self, op):
+ def XXX_consider_int_c_mod(self, op):
self._consider_int_div_or_mod(op, edx, eax)
self.perform(op, [eax, ecx], edx)
- def consider_int_c_div(self, op):
+ def XXX_consider_int_c_div(self, op):
self._consider_int_div_or_mod(op, eax, edx)
self.perform(op, [eax, ecx], eax)
- consider_uint_floordiv = consider_int_c_div
+ XXX_consider_uint_floordiv = XXX_consider_int_c_div
def _consider_compop(self, op):
vx = op.getarg(0)
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -1908,9 +1908,14 @@
op0 = SpaceOperation(opname, args, op.result)
if oopspec_name in ('int.add_ovf', 'int.mul_ovf'):
op0 = self._rewrite_symmetric(op0)
- oplist = [op0]
- if oopspec_name.endswith('_ovf'):
- oplist.insert(0, SpaceOperation('-live-', [], None))
+ oplist = [SpaceOperation('-live-', [], None), op0]
+ return oplist
+ elif oopspec_name == 'int.neg_ovf':
+ [v_x] = args
+ op0 = SpaceOperation('int_sub_ovf',
+ [Constant(0, lltype.Signed), v_x],
+ op.result)
+ oplist = [SpaceOperation('-live-', [], None), op0]
return oplist
else:
os = getattr(EffectInfo, 'OS_' + opname.upper())
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -308,6 +308,19 @@
assert op1.args == []
assert op1.result is None
+def test_neg_ovf():
+ v3 = varoftype(lltype.Signed)
+ for v1 in [varoftype(lltype.Signed), const(42)]:
+ op = SpaceOperation('direct_call', [Constant('neg_ovf'), v1], v3)
+ oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.neg_ovf', [v1])
+ op1, op0 = oplist
+ assert op0.opname == 'int_sub_ovf'
+ assert op0.args == [Constant(0), v1]
+ assert op0.result == v3
+ assert op1.opname == '-live-'
+ assert op1.args == []
+ assert op1.result is None
+
@py.test.mark.parametrize('opname', ['py_div', 'udiv', 'py_mod', 'umod'])
def test_asymmetric_op_residual(opname):
v3 = varoftype(lltype.Signed)
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -430,19 +430,6 @@
return 0, label
@arguments("i", "i", returns="i")
- def bhimpl_int_py_div(a, b):
- return a // b
-
- @arguments("i", "i", returns="i")
- def bhimpl_uint_floordiv(a, b):
- c = llop.uint_floordiv(lltype.Unsigned, r_uint(a), r_uint(b))
- return intmask(c)
-
- @arguments("i", "i", returns="i")
- def bhimpl_int_py_mod(a, b):
- return a % b
-
- @arguments("i", "i", returns="i")
def bhimpl_int_and(a, b):
return a & b
diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
--- a/rpython/jit/metainterp/executor.py
+++ b/rpython/jit/metainterp/executor.py
@@ -409,8 +409,6 @@
rop.GC_STORE,
rop.GC_STORE_INDEXED,
rop.LOAD_FROM_GC_TABLE,
- rop.INT_C_DIV,
- rop.INT_C_MOD,
): # list of opcodes never executed by pyjitpl
continue
if rop._VEC_PURE_FIRST <= value <= rop._VEC_PURE_LAST:
diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py
--- a/rpython/jit/metainterp/optimizeopt/dependency.py
+++ b/rpython/jit/metainterp/optimizeopt/dependency.py
@@ -929,10 +929,10 @@
"""
exec py.code.Source(multiplicative_func_source
.format(name='INT_MUL', op='*', tgt='mul', cop='*')).compile()
- exec py.code.Source(multiplicative_func_source
- .format(name='INT_PY_DIV', op='*', tgt='div', cop='/')).compile()
- exec py.code.Source(multiplicative_func_source
- .format(name='UINT_FLOORDIV', op='*', tgt='div', cop='/')).compile()
+ #exec py.code.Source(multiplicative_func_source
+ # .format(name='INT_PY_DIV', op='*', tgt='div', cop='/')).compile()
+ #exec py.code.Source(multiplicative_func_source
+ # .format(name='UINT_FLOORDIV', op='*', tgt='div', cop='/')).compile()
del multiplicative_func_source
array_access_source = """
@@ -1042,9 +1042,11 @@
var = ResOperation(rop.INT_MUL, args)
opt.emit_operation(var)
if self.coefficient_div != 1:
- args = [var, ConstInt(self.coefficient_div)]
- var = ResOperation(rop.INT_FLOORDIV, args)
- opt.emit_operation(var)
+ assert 0 # XXX for now; should never be the case with handling
+ # of INT_PY_DIV commented out in this file...
+ #args = [var, ConstInt(self.coefficient_div)]
+ #var = ResOperation(rop.INT_FLOORDIV, args)
+ #opt.emit_operation(var)
if self.constant > 0:
args = [var, ConstInt(self.constant)]
var = ResOperation(rop.INT_ADD, args)
diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py
--- a/rpython/jit/metainterp/optimizeopt/intbounds.py
+++ b/rpython/jit/metainterp/optimizeopt/intbounds.py
@@ -172,14 +172,14 @@
if b.bounded():
r.intersect(b)
- def optimize_INT_PY_DIV(self, op):
+ def XXX_optimize_INT_PY_DIV(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
self.emit_operation(op)
r = self.getintbound(op)
r.intersect(b1.py_div_bound(b2))
- def optimize_INT_PY_MOD(self, op):
+ def XXX_optimize_INT_PY_MOD(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
if b2.is_constant():
diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py
--- a/rpython/jit/metainterp/optimizeopt/rewrite.py
+++ b/rpython/jit/metainterp/optimizeopt/rewrite.py
@@ -168,7 +168,7 @@
break
self.emit_operation(op)
- def optimize_UINT_FLOORDIV(self, op):
+ def XXX_optimize_UINT_FLOORDIV(self, op):
b2 = self.getintbound(op.getarg(1))
if b2.is_constant() and b2.getint() == 1:
@@ -678,7 +678,7 @@
def optimize_GUARD_FUTURE_CONDITION(self, op):
self.optimizer.notice_guard_future_condition(op)
- def optimize_INT_PY_DIV(self, op):
+ def XXX_optimize_INT_PY_DIV(self, op):
arg0 = op.getarg(0)
b1 = self.getintbound(arg0)
arg1 = op.getarg(1)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -1856,6 +1856,7 @@
[i0]
jump(0)
"""
+ py.test.skip("XXX re-enable")
self.optimize_loop(ops, expected)
def test_fold_partially_constant_ops_ovf(self):
@@ -4643,6 +4644,7 @@
self.optimize_strunicode_loop(ops, expected)
def test_intmod_bounds(self):
+ py.test.skip("XXX re-enable")
ops = """
[i0, i1]
i2 = int_py_mod(i0, 12)
@@ -4699,6 +4701,7 @@
self.optimize_loop(ops, expected)
def test_intmod_bounds_bug1(self):
+ py.test.skip("XXX re-enable")
ops = """
[i0]
i1 = int_py_mod(i0, %d)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -3491,6 +3491,7 @@
self.optimize_loop(ops, expected)
def test_fold_partially_constant_uint_floordiv(self):
+ py.test.skip("XXX re-enable")
ops = """
[i0]
i1 = uint_floordiv(i0, 1)
@@ -5241,6 +5242,7 @@
self.optimize_loop(ops, expected, preamble)
def test_bound_floordiv(self):
+ py.test.skip("XXX re-enable")
ops = """
[i0, i1, i2]
it1 = int_ge(i1, 0)
@@ -5315,6 +5317,7 @@
self.optimize_loop(ops, expected, preamble)
def test_division(self):
+ py.test.skip("XXX re-enable")
ops = """
[i7, i6, i8]
it1 = int_gt(i7, 0)
@@ -5366,6 +5369,7 @@
self.optimize_loop(ops, expected, preamble)
def test_division_to_rshift(self):
+ py.test.skip("XXX re-enable")
ops = """
[i1, i2]
it = int_gt(i1, 0)
@@ -5473,6 +5477,7 @@
self.optimize_loop(ops, expected)
def test_int_div_1(self):
+ py.test.skip("XXX re-enable")
ops = """
[i0]
i1 = int_floordiv(i0, 1)
@@ -5485,6 +5490,7 @@
self.optimize_loop(ops, expected)
def test_division_nonneg(self):
+ py.test.skip("XXX re-enable")
py.test.skip("harder")
# this is how an app-level division turns into right now
ops = """
@@ -5508,6 +5514,7 @@
self.optimize_loop(ops, expected)
def test_division_by_2(self):
+ py.test.skip("XXX re-enable")
py.test.skip("harder")
ops = """
[i4]
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_schedule.py b/rpython/jit/metainterp/optimizeopt/test/test_schedule.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_schedule.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_schedule.py
@@ -394,6 +394,7 @@
self.assert_equal(loop2, loop3)
def test_no_vec_impl(self):
+ py.test.skip("XXX re-enable")
loop1 = self.parse_trace("""
i10 = int_and(255, i1)
i11 = int_and(255, i2)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
@@ -659,6 +659,7 @@
assert mref1.is_adjacent_after(mref5)
def test_array_memory_ref_div(self):
+ py.test.skip("XXX re-enable")
ops = """
[p0,i0]
i1 = int_floordiv(i0,2)
@@ -721,6 +722,7 @@
assert mref == mref2
def test_array_memory_ref_diff_not_equal(self):
+ py.test.skip("XXX re-enable")
ops = """
[p0,i0]
i1 = int_add(i0,4)
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -201,11 +201,10 @@
# ------------------------------
- for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_py_div', 'int_py_mod',
+ for _opimpl in ['int_add', 'int_sub', 'int_mul',
'int_and', 'int_or', 'int_xor', 'int_signext',
'int_rshift', 'int_lshift', 'uint_rshift',
'uint_lt', 'uint_le', 'uint_gt', 'uint_ge',
- 'uint_floordiv',
'float_add', 'float_sub', 'float_mul', 'float_truediv',
'float_lt', 'float_le', 'float_eq',
'float_ne', 'float_gt', 'float_ge',
diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py
--- a/rpython/jit/metainterp/test/test_ajit.py
+++ b/rpython/jit/metainterp/test/test_ajit.py
@@ -586,7 +586,7 @@
def internfn(y):
return y * 3
def externfn(y):
- return y % 4
+ return y ^ 4
def f(y):
while y >= 0:
myjitdriver.can_enter_jit(y=y)
@@ -601,7 +601,7 @@
policy = StopAtXPolicy(externfn)
res = self.meta_interp(f, [31], policy=policy)
assert res == 42
- self.check_resops(int_mul=2, int_py_mod=0, int_c_mod=0)
+ self.check_resops(int_mul=2, int_xor=0)
def test_we_are_jitted(self):
myjitdriver = JitDriver(greens = [], reds = ['y'])
@@ -939,6 +939,7 @@
return n
res = self.meta_interp(f, [20, 1, 2])
assert res == 0
+ py.test.skip("XXX re-enable")
self.check_resops(call_i=0, call_r=0)
def test_abs(self):
@@ -1133,7 +1134,7 @@
while n > 0:
mydriver.can_enter_jit(n=n, x=x)
mydriver.jit_merge_point(n=n, x=x)
- if n % 2 == 0:
+ if n & 1 == 0:
cls = A
else:
cls = B
diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py
--- a/rpython/jit/metainterp/test/test_dict.py
+++ b/rpython/jit/metainterp/test/test_dict.py
@@ -99,9 +99,9 @@
py.test.skip("this is an r_dict test")
myjitdriver = JitDriver(greens = [], reds = ['total', 'dct'])
def key(x):
- return x % 2
+ return x & 1
def eq(x, y):
- return (x % 2) == (y % 2)
+ return (x & 1) == (y & 1)
def f(n):
dct = objectmodel.r_dict(eq, key)
@@ -117,7 +117,7 @@
res1 = f(100)
res2 = self.meta_interp(f, [100], listops=True)
assert res1 == res2
- self.check_resops(int_py_mod=2) # the hash was traced and eq, but cached
+ self.check_resops(int_and=2) # the hash was traced and eq, but cached
def test_dict_setdefault(self):
myjitdriver = JitDriver(greens = [], reds = ['total', 'dct'])
@@ -140,9 +140,9 @@
py.test.skip("this is an r_dict test")
myjitdriver = JitDriver(greens = [], reds = ['total', 'dct'])
def key(x):
- return x % 2
+ return x & 1
def eq(x, y):
- return (x % 2) == (y % 2)
+ return (x & 1) == (y & 1)
def f(n):
dct = objectmodel.r_dict(eq, key)
@@ -156,7 +156,7 @@
assert f(100) == 50
res = self.meta_interp(f, [100], listops=True)
assert res == 50
- self.check_resops(int_py_mod=2) # key + eq, but cached
+ self.check_resops(int_and=2) # key + eq, but cached
def test_repeated_lookup(self):
if type(self.newdict()) is not dict:
@@ -370,7 +370,7 @@
d = {}
while n > 0:
myjitdriver.jit_merge_point()
- if n % 10 == 0:
+ if n & 7 == 0:
n -= len(d)
d = {}
d["a"] = n
diff --git a/rpython/jit/metainterp/test/test_executor.py b/rpython/jit/metainterp/test/test_executor.py
--- a/rpython/jit/metainterp/test/test_executor.py
+++ b/rpython/jit/metainterp/test/test_executor.py
@@ -142,18 +142,18 @@
(133, 133, 0)]),
(rop.INT_MUL, [(-6, -3, 18),
(15, 15, 225)]),
- (rop.INT_FLOORDIV, [(110, 3, 36),
- (-110, 3, -36),
- (110, -3, -36),
- (-110, -3, 36),
- (-110, -1, 110),
- (minint, 1, minint),
- (-87, -87, 1)]),
- (rop.INT_MOD, [(11, 3, 2),
- (-11, 3, -2),
- (11, -3, 2),
- (-11, -3, -2),
- (-87, -87, 0)]),
+ ## (rop.INT_FLOORDIV, [(110, 3, 36),
+ ## (-110, 3, -36),
+ ## (110, -3, -36),
+ ## (-110, -3, 36),
+ ## (-110, -1, 110),
+ ## (minint, 1, minint),
+ ## (-87, -87, 1)]),
+ ## (rop.INT_MOD, [(11, 3, 2),
+ ## (-11, 3, -2),
+ ## (11, -3, 2),
+ ## (-11, -3, -2),
+ ## (-87, -87, 0)]),
(rop.INT_AND, [(0xFF00, 0x0FF0, 0x0F00),
(-111, -111, -111)]),
(rop.INT_OR, [(0xFF00, 0x0FF0, 0xFFF0),
@@ -170,15 +170,15 @@
(rop.UINT_RSHIFT, [(-1, 4, intmask(r_uint(-1) >> r_uint(4))),
( 1, 4, intmask(r_uint(1) >> r_uint(4))),
( 3, 3, 0)]),
- (rop.UINT_FLOORDIV, [(4, 3, intmask(r_uint(4) / r_uint(3))),
- (1, -1, intmask(r_uint(1) / r_uint(-1))),
- (110, 3, 36),
- (-110, 3, intmask(r_uint(-110) / r_uint(3))),
- (110, -3, intmask(r_uint(110) / r_uint(-3))),
- (-110, -3, intmask(r_uint(-110) / r_uint(-3))),
- (-110, -1, intmask(r_uint(-110) / r_uint(-1))),
- (minint, 1, intmask(r_uint(minint) / r_uint(1))),
- (-87, -87, intmask(r_uint(-87) / r_uint(-87)))])
+ ## (rop.UINT_FLOORDIV, [(4, 3, intmask(r_uint(4) / r_uint(3))),
+ ## (1, -1, intmask(r_uint(1) / r_uint(-1))),
+ ## (110, 3, 36),
+ ## (-110, 3, intmask(r_uint(-110) / r_uint(3))),
+ ## (110, -3, intmask(r_uint(110) / r_uint(-3))),
+ ## (-110, -3, intmask(r_uint(-110) / r_uint(-3))),
+ ## (-110, -1, intmask(r_uint(-110) / r_uint(-1))),
+ ## (minint, 1, intmask(r_uint(minint) / r_uint(1))),
+ ## (-87, -87, intmask(r_uint(-87) / r_uint(-87)))])
]:
for x, y, z in testcases:
yield opnum, [x, y], z
diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py
--- a/rpython/jit/metainterp/test/test_list.py
+++ b/rpython/jit/metainterp/test/test_list.py
@@ -212,7 +212,7 @@
s += lst[0]
lst.pop()
lst.append(1)
- s /= lst.pop()
+ s *= lst.pop()
return s
res = self.meta_interp(f, [15], listops=True)
assert res == f(15)
diff --git a/rpython/jit/metainterp/test/test_string.py b/rpython/jit/metainterp/test/test_string.py
--- a/rpython/jit/metainterp/test/test_string.py
+++ b/rpython/jit/metainterp/test/test_string.py
@@ -511,7 +511,7 @@
def f(n):
while n < 21:
driver.jit_merge_point(n=n)
- promote_string(str(n % 3))
+ promote_string(str(n & 3))
n += 1
return 0
diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py
--- a/rpython/rtyper/rint.py
+++ b/rpython/rtyper/rint.py
@@ -594,9 +594,8 @@
raise OverflowError("x<
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84353:d55201e3d06c
Date: 2016-05-10 20:57 +0200
http://bitbucket.org/pypy/pypy/changeset/d55201e3d06c/
Log: Backend
diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py
--- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py
+++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py
@@ -497,6 +497,7 @@
assert s[1] == 'a'
def test_division_optimized(self):
+ py.test.skip("XXX re-enable")
ops = '''
[i7, i6]
label(i7, i6, descr=targettoken)
diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py
--- a/rpython/jit/backend/test/test_random.py
+++ b/rpython/jit/backend/test/test_random.py
@@ -548,8 +548,8 @@
]:
OPERATIONS.append(BinaryOperation(_op, boolres=True))
-OPERATIONS.append(BinaryOperation(rop.INT_FLOORDIV, ~3, 2))
-OPERATIONS.append(BinaryOperation(rop.INT_MOD, ~3, 2))
+#OPERATIONS.append(BinaryOperation(rop.INT_FLOORDIV, ~3, 2))
+#OPERATIONS.append(BinaryOperation(rop.INT_MOD, ~3, 2))
OPERATIONS.append(BinaryOperation(rop.INT_RSHIFT, LONG_BIT-1))
OPERATIONS.append(BinaryOperation(rop.INT_LSHIFT, LONG_BIT-1))
OPERATIONS.append(BinaryOperation(rop.UINT_RSHIFT, LONG_BIT-1))
From pypy.commits at gmail.com Tue May 10 15:32:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 12:32:44 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Temporarily(?) mask these two
llops from the JIT
Message-ID: <5732375c.0c2e1c0a.bc1ee.ffff89fa@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84354:4419d4631487
Date: 2016-05-10 21:33 +0200
http://bitbucket.org/pypy/pypy/changeset/4419d4631487/
Log: Temporarily(?) mask these two llops from the JIT
diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py
--- a/pypy/module/__pypy__/interp_intop.py
+++ b/pypy/module/__pypy__/interp_intop.py
@@ -2,6 +2,19 @@
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.rarithmetic import r_uint, intmask
+from rpython.rlib import jit
+
+
+# XXX maybe temporary: hide llop.int_{floordiv,mod} from the JIT,
+# because now it expects only Python-style divisions, not the
+# C-style divisions of these two ll operations
+ at jit.dont_look_inside
+def _int_floordiv(n, m):
+ return llop.int_floordiv(lltype.Signed, n, m)
+
+ at jit.dont_look_inside
+def _int_mod(n, m):
+ return llop.int_mod(lltype.Signed, n, m)
@unwrap_spec(n=int, m=int)
@@ -18,11 +31,11 @@
@unwrap_spec(n=int, m=int)
def int_floordiv(space, n, m):
- return space.wrap(llop.int_floordiv(lltype.Signed, n, m))
+ return space.wrap(_int_floordiv(n, m))
@unwrap_spec(n=int, m=int)
def int_mod(space, n, m):
- return space.wrap(llop.int_mod(lltype.Signed, n, m))
+ return space.wrap(_int_mod(n, m))
@unwrap_spec(n=int, m=int)
def int_lshift(space, n, m):
From pypy.commits at gmail.com Tue May 10 17:29:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 10 May 2016 14:29:33 -0700 (PDT)
Subject: [pypy-commit] pypy default: Add comment
Message-ID: <573252bd.c61ec20a.7e397.ffff9893@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84355:005256ca5fa9
Date: 2016-05-10 23:29 +0200
http://bitbucket.org/pypy/pypy/changeset/005256ca5fa9/
Log: Add comment
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -466,6 +466,13 @@
list = self.fired_actions
if list is not None:
self.fired_actions = None
+ # NB. in case there are several actions, we reset each
+ # 'action._fired' to false only when we're about to call
+ # 'action.perform()'. This means that if
+ # 'action.fire()' happens to be called any time before
+ # the corresponding perform(), the fire() has no
+ # effect---which is the effect we want, because
+ # perform() will be called anyway.
for action in list:
action._fired = False
action.perform(ec, frame)
From pypy.commits at gmail.com Wed May 11 03:15:50 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:15:50 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-ext: merge default into branch
Message-ID: <5732dc26.22acc20a.4fe73.2c14@mx.google.com>
Author: Matti Picus
Branch: cpyext-ext
Changeset: r84356:f5d55063ed2d
Date: 2016-05-09 21:00 +0300
http://bitbucket.org/pypy/pypy/changeset/f5d55063ed2d/
Log: merge default into branch
diff too long, truncating to 2000 out of 24254 lines
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,127 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
+
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+In theory, it would kind of work if you cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +141,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +245,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +256,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -61,3 +61,31 @@
calls PyXxx", we now silently acquire/release the GIL. Helps with
CPython C extension modules that call some PyXxx() functions without
holding the GIL (arguably, they are theorically buggy).
+
+.. branch: cpyext-test-A
+
+Get the cpyext tests to pass with "-A" (i.e. when tested directly with
+CPython).
+
+.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
+
+.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
+.. branch: cpyext-more-slots
+
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,11 +352,9 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,8 +16,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -399,8 +399,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -52,7 +53,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -67,8 +68,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -77,7 +78,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return '?'
raise
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,37 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
+ """
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
"""
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -318,7 +330,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -389,9 +401,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
@@ -406,7 +418,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +452,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +488,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -706,8 +718,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -764,7 +775,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -772,7 +783,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -872,7 +883,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -896,13 +907,12 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
if idx == expected_length:
- raise OperationError(self.w_ValueError,
- self.wrap("too many values to unpack"))
+ raise oefmt(self.w_ValueError, "too many values to unpack")
items[idx] = w_item
idx += 1
if idx < expected_length:
@@ -942,7 +952,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -952,7 +962,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -962,8 +972,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1049,7 +1059,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1167,7 +1177,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1287,7 +1297,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1330,8 +1340,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1351,8 +1360,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1365,7 +1373,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1375,7 +1383,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1396,20 +1404,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1526,7 +1531,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1555,8 +1560,8 @@
from rpython.rlib import rstring
result = w_obj.str_w(self)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1596,8 +1601,7 @@
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.isinstance_w(w_obj, self.w_str):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a string'))
+ raise oefmt(self.w_TypeError, "argument must be a string")
return self.str_w(w_obj)
def unicode_w(self, w_obj):
@@ -1608,16 +1612,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def bool_w(self, w_obj):
@@ -1636,8 +1640,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1645,8 +1649,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1654,8 +1657,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1663,8 +1665,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1673,11 +1675,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1705,7 +1705,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1716,7 +1716,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1731,22 +1731,20 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno() "
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if (not self.isinstance_w(w_fd, self.w_int) and
not self.isinstance_w(w_fd, self.w_long)):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
@@ -1858,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -214,9 +214,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
@@ -515,75 +521,98 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+ self._call_finalizer(w_obj)
+
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
+ pdd = self.pending_with_disabled_del
+ if pdd is None:
+ return False
+ else:
+ pdd.append(w_obj)
+ return True
+
+ def _call_finalizer(self, w_obj):
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
space = self.space
- while pending is not None:
+ if w_obj.typedef is None:
+ w_del = None # obscure case: for WeakrefLifeline
+ else:
+ w_del = space.lookup(w_obj, '__del__')
+ if w_del is not None:
+ if self.gc_disabled(w_obj):
+ return
try:
- pending.callback(pending.w_obj)
- except OperationError, e:
- e.write_unraisable(space, pending.descrname, pending.w_obj)
- e.clear(space) # break up reference cycles
- pending = pending.next
- #
- # Note: 'dying_objects' used to be just a regular list instead
- # of a chained list. This was the cause of "leaks" if we have a
- # program that constantly creates new objects with finalizers.
- # Here is why: say 'dying_objects' is a long list, and there
- # are n instances in it. Then we spend some time in this
- # function, possibly triggering more GCs, but keeping the list
- # of length n alive. Then the list is suddenly freed at the
- # end, and we return to the user program. At this point the
- # GC limit is still very high, because just before, there was
- # a list of length n alive. Assume that the program continues
- # to allocate a lot of instances with finalizers. The high GC
- # limit means that it could allocate a lot of instances before
- # reaching it --- possibly more than n. So the whole procedure
- # repeats with higher and higher values of n.
- #
- # This does not occur in the current implementation because
- # there is no list of length n: if n is large, then the GC
- # will run several times while walking the list, but it will
- # see lower and lower memory usage, with no lower bound of n.
+ space.get_and_call_function(w_del, w_obj)
+ except Exception as e:
+ report_error(space, e, "method __del__ of ", w_obj)
+
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ report_error(space, e, "finalizer of ", w_obj)
+
+
+def report_error(space, e, where, w_obj):
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
+
+
+def make_finalizer_queue(W_Root, space):
+ """Make a FinalizerQueue subclass which responds to GC finalizer
+ events by 'firing' the UserDelAction class above. It does not
+ directly fetches the objects to finalize at all; they stay in the
+ GC-managed queue, and will only be fetched by UserDelAction
+ (between bytecodes)."""
+
+ class WRootFinalizerQueue(rgc.FinalizerQueue):
+ Class = W_Root
+
+ def finalizer_trigger(self):
+ space.user_del_action.fire()
+
+ space.user_del_action = UserDelAction(space)
+ space.finalizer_queue = WRootFinalizerQueue()
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -202,16 +202,15 @@
def setdict(self, space, w_dict):
if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting function's dictionary to a non-dict")
- )
+ raise oefmt(space.w_TypeError,
+ "setting function's dictionary to a non-dict")
self.w_func_dict = w_dict
def descr_function__new__(space, w_subtype, w_code, w_globals,
w_name=None, w_argdefs=None, w_closure=None):
code = space.interp_w(Code, w_code)
if not space.isinstance_w(w_globals, space.w_dict):
- raise OperationError(space.w_TypeError, space.wrap("expected dict"))
+ raise oefmt(space.w_TypeError, "expected dict")
if not space.is_none(w_name):
name = space.str_w(w_name)
else:
@@ -227,15 +226,15 @@
if space.is_none(w_closure) and nfreevars == 0:
closure = None
elif not space.is_w(space.type(w_closure), space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("invalid closure"))
+ raise oefmt(space.w_TypeError, "invalid closure")
else:
from pypy.interpreter.nestedscope import Cell
closure_w = space.unpackiterable(w_closure)
n = len(closure_w)
if nfreevars == 0:
- raise OperationError(space.w_ValueError, space.wrap("no closure needed"))
+ raise oefmt(space.w_ValueError, "no closure needed")
elif nfreevars != n:
- raise OperationError(space.w_ValueError, space.wrap("closure is wrong size"))
+ raise oefmt(space.w_ValueError, "closure is wrong size")
closure = [space.interp_w(Cell, w_cell) for w_cell in closure_w]
func = space.allocate_instance(Function, w_subtype)
Function.__init__(func, space, code, w_globals, defs_w, closure, name)
@@ -321,8 +320,8 @@
w_func_dict, w_module) = args_w
except ValueError:
# wrong args
- raise OperationError(space.w_ValueError,
- space.wrap("Wrong arguments to function.__setstate__"))
+ raise oefmt(space.w_ValueError,
+ "Wrong arguments to function.__setstate__")
self.space = space
self.name = space.str_w(w_name)
@@ -359,7 +358,8 @@
self.defs_w = []
return
if not space.isinstance_w(w_defaults, space.w_tuple):
- raise OperationError(space.w_TypeError, space.wrap("func_defaults must be set to a tuple object or None"))
+ raise oefmt(space.w_TypeError,
+ "func_defaults must be set to a tuple object or None")
self.defs_w = space.fixedview(w_defaults)
def fdel_func_defaults(self, space):
@@ -380,8 +380,8 @@
if space.isinstance_w(w_name, space.w_str):
self.name = space.str_w(w_name)
else:
- raise OperationError(space.w_TypeError,
- space.wrap("__name__ must be set to a string object"))
+ raise oefmt(space.w_TypeError,
+ "__name__ must be set to a string object")
def fdel_func_doc(self, space):
self.w_doc = space.w_None
@@ -406,8 +406,8 @@
def fset_func_code(self, space, w_code):
from pypy.interpreter.pycode import PyCode
if not self.can_change_code:
- raise OperationError(space.w_TypeError,
- space.wrap("Cannot change code attribute of builtin functions"))
+ raise oefmt(space.w_TypeError,
+ "Cannot change code attribute of builtin functions")
code = space.interp_w(Code, w_code)
closure_len = 0
if self.closure:
@@ -457,8 +457,7 @@
if space.is_w(w_instance, space.w_None):
w_instance = None
if w_instance is None and space.is_none(w_class):
- raise OperationError(space.w_TypeError,
- space.wrap("unbound methods must have class"))
+ raise oefmt(space.w_TypeError, "unbound methods must have class")
method = space.allocate_instance(Method, w_subtype)
Method.__init__(method, space, w_function, w_instance, w_class)
return space.wrap(method)
@@ -540,7 +539,7 @@
try:
return space.call_method(space.w_object, '__getattribute__',
space.wrap(self), w_attr)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# fall-back to the attribute of the underlying 'im_func'
@@ -659,8 +658,8 @@
self.w_module = func.w_module
def descr_builtinfunction__new__(space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("cannot create 'builtin_function' instances"))
+ raise oefmt(space.w_TypeError,
+ "cannot create 'builtin_function' instances")
def descr_function_repr(self):
return self.space.wrap('' % (self.name,))
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -21,7 +21,7 @@
from pypy.interpreter.signature import Signature
from pypy.interpreter.baseobjspace import (W_Root, ObjSpace, SpaceCache,
DescrMismatch)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import ClassMethod, FunctionWithFixedCode
from rpython.rlib import rstackovf
from rpython.rlib.objectmodel import we_are_translated
@@ -686,7 +686,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -699,14 +699,13 @@
raise
raise e
except KeyboardInterrupt:
- raise OperationError(space.w_KeyboardInterrupt,
- space.w_None)
+ raise OperationError(space.w_KeyboardInterrupt, space.w_None)
except MemoryError:
raise OperationError(space.w_MemoryError, space.w_None)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
rstackovf.check_stack_overflow()
- raise OperationError(space.w_RuntimeError,
- space.wrap("maximum recursion depth exceeded"))
+ raise oefmt(space.w_RuntimeError,
+ "maximum recursion depth exceeded")
except RuntimeError: # not on top of py.py
raise OperationError(space.w_RuntimeError, space.w_None)
@@ -725,7 +724,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args)
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -746,7 +745,7 @@
self.descrmismatch_op,
self.descr_reqcls,
args.prepend(w_obj))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -762,9 +761,8 @@
try:
w_result = self.fastfunc_0(space)
except DescrMismatch:
- raise OperationError(space.w_SystemError,
- space.wrap("unexpected DescrMismatch error"))
- except Exception, e:
+ raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -784,7 +782,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -804,7 +802,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -824,7 +822,7 @@
self.descrmismatch_op,
self.descr_reqcls,
Arguments(space, [w1, w2, w3]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
@@ -845,7 +843,7 @@
self.descr_reqcls,
Arguments(space,
[w1, w2, w3, w4]))
- except Exception, e:
+ except Exception as e:
self.handle_exception(space, e)
w_result = None
if w_result is None:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,6 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.pyopcode import LoopBlock
+from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY
from rpython.rlib import jit
@@ -13,6 +14,8 @@
self.frame = frame # turned into None when frame_finished_execution
self.pycode = frame.pycode
self.running = False
+ if self.pycode.co_flags & CO_YIELD_INSIDE_TRY:
+ self.register_finalizer(self.space)
def descr__repr__(self, space):
if self.pycode is None:
@@ -76,8 +79,7 @@
def _send_ex(self, w_arg, operr):
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None:
# xxx a bit ad-hoc, but we don't want to go inside
@@ -89,8 +91,9 @@
last_instr = jit.promote(frame.last_instr)
if last_instr == -1:
if w_arg and not space.is_w(w_arg, space.w_None):
- msg = "can't send non-None value to a just-started generator"
- raise OperationError(space.w_TypeError, space.wrap(msg))
+ raise oefmt(space.w_TypeError,
+ "can't send non-None value to a just-started "
+ "generator")
else:
if not w_arg:
w_arg = space.w_None
@@ -139,20 +142,19 @@
def descr_close(self):
"""x.close(arg) -> raise GeneratorExit inside generator."""
- assert isinstance(self, GeneratorIterator)
space = self.space
try:
w_retval = self.throw(space.w_GeneratorExit, space.w_None,
space.w_None)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_StopIteration) or \
e.match(space, space.w_GeneratorExit):
return space.w_None
raise
if w_retval is not None:
- msg = "generator ignored GeneratorExit"
- raise OperationError(space.w_RuntimeError, space.wrap(msg))
+ raise oefmt(space.w_RuntimeError,
+ "generator ignored GeneratorExit")
def descr_gi_frame(self, space):
if self.frame is not None and not self.frame.frame_finished_execution:
@@ -184,8 +186,7 @@
# XXX copied and simplified version of send_ex()
space = self.space
if self.running:
- raise OperationError(space.w_ValueError,
- space.wrap('generator already executing'))
+ raise oefmt(space.w_ValueError, "generator already executing")
frame = self.frame
if frame is None: # already finished
return
@@ -197,7 +198,7 @@
results=results, pycode=pycode)
try:
w_result = frame.execute_frame(space.w_None)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
@@ -213,25 +214,21 @@
unpack_into = _create_unpack_into()
unpack_into_w = _create_unpack_into()
-
-class GeneratorIteratorWithDel(GeneratorIterator):
-
- def __del__(self):
- # Only bother enqueuing self to raise an exception if the frame is
- # still not finished and finally or except blocks are present.
- self.clear_all_weakrefs()
+ def _finalize_(self):
+ # This is only called if the CO_YIELD_INSIDE_TRY flag is set
+ # on the code object. If the frame is still not finished and
+ # finally or except blocks are present at the current
+ # position, then raise a GeneratorExit. Otherwise, there is
+ # no point.
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
if not isinstance(block, LoopBlock):
- self.enqueue_for_destruction(self.space,
- GeneratorIterator.descr_close,
- "interrupting generator of ")
+ self.descr_close()
break
block = block.previous
-
def get_printable_location_genentry(bytecode):
return '%s ' % (bytecode.get_repr(),)
generatorentry_driver = jit.JitDriver(greens=['pycode'],
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -8,7 +8,7 @@
w_modules = space.sys.get('modules')
try:
return space.getitem(w_modules, w_main)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
mainmodule = module.Module(space, w_main)
@@ -52,7 +52,7 @@
else:
return
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.record_interpreter_traceback()
raise
@@ -110,7 +110,7 @@
try:
w_stdout = space.sys.get('stdout')
w_softspace = space.getattr(w_stdout, space.wrap('softspace'))
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# Don't crash if user defined stdout doesn't have softspace
@@ -118,7 +118,7 @@
if space.is_true(w_softspace):
space.call_method(w_stdout, 'write', space.wrap('\n'))
- except OperationError, operationerr:
+ except OperationError as operationerr:
operationerr.normalize_exception(space)
w_type = operationerr.w_type
w_value = operationerr.get_w_value(space)
@@ -162,7 +162,7 @@
space.call_function(w_hook, w_type, w_value, w_traceback)
return False # done
- except OperationError, err2:
+ except OperationError as err2:
# XXX should we go through sys.get('stderr') ?
print >> sys.stderr, 'Error calling sys.excepthook:'
err2.print_application_traceback(space)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -169,7 +169,7 @@
while 1:
try:
value = eval(spec, d)
- except NameError, ex:
+ except NameError as ex:
name = ex.args[0].split("'")[1] # super-Evil
if name in d:
raise # propagate the NameError
diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
--- a/pypy/interpreter/nestedscope.py
+++ b/pypy/interpreter/nestedscope.py
@@ -1,7 +1,7 @@
from rpython.tool.uid import uid
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import oefmt
from pypy.interpreter.mixedmodule import MixedModule
@@ -78,4 +78,4 @@
try:
return self.get()
except ValueError:
- raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
+ raise oefmt(space.w_ValueError, "Cell is empty")
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -8,7 +8,7 @@
from pypy.interpreter import eval
from pypy.interpreter.signature import Signature
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.astcompiler.consts import (
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
@@ -110,7 +110,7 @@
if code_hook is not None:
try:
self.space.call_function(code_hook, self)
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
@@ -374,14 +374,13 @@
lnotab, w_freevars=None, w_cellvars=None,
magic=default_magic):
if argcount < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: argcount must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: argcount must not be negative")
if nlocals < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("code: nlocals must not be negative"))
+ raise oefmt(space.w_ValueError,
+ "code: nlocals must not be negative")
if not space.isinstance_w(w_constants, space.w_tuple):
- raise OperationError(space.w_TypeError,
- space.wrap("Expected tuple for constants"))
+ raise oefmt(space.w_TypeError, "Expected tuple for constants")
consts_w = space.fixedview(w_constants)
names = unpack_str_tuple(space, w_names)
varnames = unpack_str_tuple(space, w_varnames)
diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py
--- a/pypy/interpreter/pycompiler.py
+++ b/pypy/interpreter/pycompiler.py
@@ -7,7 +7,7 @@
from pypy.interpreter.pyparser import future, pyparse, error as parseerror
from pypy.interpreter.astcompiler import (astbuilder, codegen, consts, misc,
optimize, ast)
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
class AbstractCompiler(object):
@@ -55,21 +55,21 @@
try:
code = self.compile(source, filename, mode, flags)
return code # success
- except OperationError, err:
+ except OperationError as err:
if not err.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n", filename, mode, flags)
return None # expect more
- except OperationError, err1:
+ except OperationError as err1:
if not err1.match(space, space.w_SyntaxError):
raise
try:
self.compile(source + "\n\n", filename, mode, flags)
raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
+ except OperationError as err2:
if not err2.match(space, space.w_SyntaxError):
raise
@@ -116,8 +116,7 @@
else:
check = True
if not check:
- raise OperationError(self.space.w_TypeError, self.space.wrap(
- "invalid node type"))
+ raise oefmt(self.space.w_TypeError, "invalid node type")
fut = misc.parse_future(node, self.future_flags.compiler_features)
f_flags, f_lineno, f_col = fut
@@ -131,9 +130,8 @@
try:
mod = optimize.optimize_ast(space, node, info)
code = codegen.compile_ast(space, mod, info)
- except parseerror.SyntaxError, e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ except parseerror.SyntaxError as e:
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return code
def compile_to_ast(self, source, filename, mode, flags):
@@ -145,12 +143,10 @@
try:
parse_tree = self.parser.parse_source(source, info)
mod = astbuilder.ast_from_node(space, parse_tree, info)
- except parseerror.IndentationError, e:
- raise OperationError(space.w_IndentationError,
- e.wrap_info(space))
- except parseerror.SyntaxError, e:
- raise OperationError(space.w_SyntaxError,
- e.wrap_info(space))
+ except parseerror.IndentationError as e:
+ raise OperationError(space.w_IndentationError, e.wrap_info(space))
+ except parseerror.SyntaxError as e:
+ raise OperationError(space.w_SyntaxError, e.wrap_info(space))
return mod
def compile(self, source, filename, mode, flags, hidden_applevel=False):
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -220,9 +220,9 @@
return # no cells needed - fast path
elif outer_func is None:
space = self.space
- raise OperationError(space.w_TypeError,
- space.wrap("directly executed code object "
- "may not contain free variables"))
+ raise oefmt(space.w_TypeError,
+ "directly executed code object may not contain free "
+ "variables")
if outer_func and outer_func.closure:
closure_size = len(outer_func.closure)
else:
@@ -241,12 +241,8 @@
def run(self):
"""Start this frame's execution."""
if self.getcode().co_flags & pycode.CO_GENERATOR:
- if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY:
- from pypy.interpreter.generator import GeneratorIteratorWithDel
- return self.space.wrap(GeneratorIteratorWithDel(self))
- else:
- from pypy.interpreter.generator import GeneratorIterator
- return self.space.wrap(GeneratorIterator(self))
+ from pypy.interpreter.generator import GeneratorIterator
+ return self.space.wrap(GeneratorIterator(self))
else:
return self.execute_frame()
@@ -513,7 +509,7 @@
self.locals_cells_stack_w = values_w[:]
valuestackdepth = space.int_w(w_stackdepth)
if not self._check_stack_index(valuestackdepth):
- raise OperationError(space.w_ValueError, space.wrap("invalid stackdepth"))
+ raise oefmt(space.w_ValueError, "invalid stackdepth")
assert valuestackdepth >= 0
self.valuestackdepth = valuestackdepth
if space.is_w(w_exc_value, space.w_None):
@@ -550,7 +546,7 @@
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
- raise ValueError, "new fastscope is longer than the allocated area"
+ raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
@@ -686,12 +682,11 @@
try:
new_lineno = space.int_w(w_new_lineno)
except OperationError:
- raise OperationError(space.w_ValueError,
- space.wrap("lineno must be an integer"))
+ raise oefmt(space.w_ValueError, "lineno must be an integer")
if self.get_w_f_trace() is None:
- raise OperationError(space.w_ValueError,
- space.wrap("f_lineno can only be set by a trace function."))
+ raise oefmt(space.w_ValueError,
+ "f_lineno can only be set by a trace function.")
line = self.pycode.co_firstlineno
if new_lineno < line:
@@ -718,8 +713,8 @@
# Don't jump to a line with an except in it.
code = self.pycode.co_code
if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump to 'except' line as there's no exception"))
+ raise oefmt(space.w_ValueError,
+ "can't jump to 'except' line as there's no exception")
# Don't jump into or out of a finally block.
f_lasti_setup_addr = -1
@@ -800,8 +795,8 @@
new_iblock = f_iblock - delta_iblock
if new_iblock > min_iblock:
- raise OperationError(space.w_ValueError,
- space.wrap("can't jump into the middle of a block"))
+ raise oefmt(space.w_ValueError,
+ "can't jump into the middle of a block")
while f_iblock > new_iblock:
block = self.pop_block()
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -67,9 +67,9 @@
def handle_bytecode(self, co_code, next_instr, ec):
try:
next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
- except OperationError, operr:
+ except OperationError as operr:
next_instr = self.handle_operation_error(ec, operr)
- except RaiseWithExplicitTraceback, e:
+ except RaiseWithExplicitTraceback as e:
next_instr = self.handle_operation_error(ec, e.operr,
attach_tb=False)
except KeyboardInterrupt:
@@ -78,7 +78,7 @@
except MemoryError:
next_instr = self.handle_asynchronous_error(ec,
self.space.w_MemoryError)
- except rstackovf.StackOverflow, e:
+ except rstackovf.StackOverflow as e:
# Note that this case catches AttributeError!
rstackovf.check_stack_overflow()
From pypy.commits at gmail.com Wed May 11 03:15:57 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:15:57 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast2: first try at generating
casting macros, but must remove second macro from pypy_macros.h
Message-ID: <5732dc2d.a553c20a.b5219.2903@mx.google.com>
Author: Matti Picus
Branch: cpyext-macros-cast2
Changeset: r84359:463b5eaaf35d
Date: 2016-05-11 01:04 +0300
http://bitbucket.org/pypy/pypy/changeset/463b5eaaf35d/
Log: first try at generating casting macros, but must remove second macro
from pypy_macros.h
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -301,7 +301,7 @@
DEFAULT_HEADER = 'pypy_decl.h'
def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER,
- gil=None, result_borrowed=False, result_is_ll=False):
+ gil=None, result_borrowed=False, result_is_ll=False, cast=False):
"""
Declares a function to be exported.
- `argtypes`, `restype` are lltypes and describe the function signature.
@@ -313,6 +313,8 @@
a C function pointer, but not exported by the API headers.
- set `gil` to "acquire", "release" or "around" to acquire the GIL,
release the GIL, or both
+ - 'cast' if True will create an UPPER CASE macro definition that casts
+ the first argument to the proper PyObject* type
"""
if isinstance(restype, lltype.Typedef):
real_restype = restype.OF
@@ -433,6 +435,8 @@
if header == DEFAULT_HEADER:
FUNCTIONS[func_name] = api_function
FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function
+ if cast:
+ CASTS.setdefault(header, {})[func_name] = api_function
INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests
return unwrapper_raise # used in 'normal' RPython code.
return decorate
@@ -451,6 +455,7 @@
INTERPLEVEL_API = {}
FUNCTIONS = {}
+CASTS = {}
FUNCTIONS_BY_HEADER = {}
# These are C symbols which cpyext will export, but which are defined in .c
@@ -995,7 +1000,6 @@
arg = db.gettype(argtype)
arg = arg.replace('@', 'arg%d' % (i,)).strip()
args.append(arg)
- args = ', '.join(args) or "void"
return restype, args
#_____________________________________________________
@@ -1023,6 +1027,7 @@
# added only for the macro, not the decl
continue
restype, args = c_function_signature(db, func)
+ args = ', '.join(args) or "void"
members.append('%s (*%s)(%s);' % (restype, name, args))
structindex[name] = len(structindex)
structmembers = '\n'.join(members)
@@ -1254,6 +1259,25 @@
for decl in FORWARD_DECLS:
pypy_decls.append("%s;" % (decl,))
+ casts = []
+ for header_name, header_functions in CASTS.iteritems():
+ header = decls[header_name]
+ for name, func in sorted(header_functions.iteritems()):
+ # create define casts like
+ # #define PyInt_AS_LONG(a1) PyPyInt_AS_LONG(PyObject *)a1)
+ if not func:
+ continue
+ casts.append(name)
+ _name = mangle_name(prefix, name)
+ assert _name is not None, 'error converting %s' % name
+ restype, args = c_function_signature(db, func)
+ l_args = ', '.join(['a%d' % i for i in xrange(len(args))])
+ r_args = ', '.join(['(%s)a%d' % (a.split('arg')[0], i)
+ for i,a in enumerate(args)])
+ _name = mangle_name(prefix, name)
+ header.append("#define %s(%s) %s(%s)" % (name, l_args, _name, r_args))
+ print casts
+ xxxx
for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems():
if header_name not in decls:
header = decls[header_name] = []
@@ -1265,14 +1289,12 @@
for name, func in sorted(header_functions.iteritems()):
if not func:
continue
- if header == DEFAULT_HEADER:
- _name = name
- else:
- # this name is not included in pypy_macros.h
+ if name not in casts:
_name = mangle_name(prefix, name)
assert _name is not None, 'error converting %s' % name
header.append("#define %s %s" % (name, _name))
restype, args = c_function_signature(db, func)
+ args = ', '.join(args) or "void"
header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args))
if api_struct:
callargs = ', '.join('arg%d' % (i,)
@@ -1408,7 +1430,7 @@
def setup_library(space):
"NOT_RPYTHON"
use_micronumpy = setup_micronumpy(space)
- export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS)
+ export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) # dict -> list
from rpython.translator.c.database import LowLevelDatabase
db = LowLevelDatabase()
prefix = 'PyPy'
diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py
--- a/pypy/module/cpyext/intobject.py
+++ b/pypy/module/cpyext/intobject.py
@@ -104,7 +104,7 @@
num = space.bigint_w(w_int)
return num.ulonglongmask()
- at cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL)
+ at cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL, cast=True)
def PyInt_AS_LONG(space, w_int):
"""Return the value of the object w_int. No error checking is performed."""
return space.int_w(w_int)
From pypy.commits at gmail.com Wed May 11 03:15:53 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:15:53 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast2: generate 'casting' macros
for some functions
Message-ID: <5732dc29.171d1c0a.df524.4f7e@mx.google.com>
Author: Matti Picus
Branch: cpyext-macros-cast2
Changeset: r84357:781c52f85655
Date: 2016-05-10 23:50 +0300
http://bitbucket.org/pypy/pypy/changeset/781c52f85655/
Log: generate 'casting' macros for some functions
From pypy.commits at gmail.com Wed May 11 03:16:00 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:16:00 -0700 (PDT)
Subject: [pypy-commit] pypy ufunc-outer: implement numpypy.ufunc.outer
Message-ID: <5732dc30.cbb81c0a.e1563.5f7d@mx.google.com>
Author: Matti Picus
Branch: ufunc-outer
Changeset: r84361:c929b6b04c28
Date: 2016-05-11 09:26 +0300
http://bitbucket.org/pypy/pypy/changeset/c929b6b04c28/
Log: implement numpypy.ufunc.outer
From pypy.commits at gmail.com Wed May 11 03:15:55 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:15:55 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast2: copy tests from
cpyext=macros-cast
Message-ID: <5732dc2b.c486c20a.3cbcc.2b7b@mx.google.com>
Author: Matti Picus
Branch: cpyext-macros-cast2
Changeset: r84358:a450940bdbd5
Date: 2016-05-10 23:51 +0300
http://bitbucket.org/pypy/pypy/changeset/a450940bdbd5/
Log: copy tests from cpyext=macros-cast
diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
--- a/pypy/module/cpyext/test/test_bytesobject.py
+++ b/pypy/module/cpyext/test/test_bytesobject.py
@@ -288,6 +288,24 @@
# This does not test much, but at least the refcounts are checked.
assert module.test_intern_inplace('s') == 's'
+ def test_bytes_macros(self):
+ """The PyString_* macros cast, and calls expecting that build."""
+ module = self.import_extension('foo', [
+ ("test_macro_invocations", "METH_NOARGS",
+ """
+ PyObject* o = PyString_FromString("");
+ PyStringObject* u = (PyStringObject*)o;
+
+ PyString_GET_SIZE(u);
+ PyString_GET_SIZE(o);
+
+ PyString_AS_STRING(o);
+ PyString_AS_STRING(u);
+
+ return o;
+ """)])
+ assert module.test_macro_invocations() == ''
+
def test_hash_and_state(self):
module = self.import_extension('foo', [
("test_hash", "METH_VARARGS",
diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -117,3 +117,106 @@
datetime.timedelta,
datetime.tzinfo)
module.clear_types()
+
+ def test_macros(self):
+ module = self.import_extension('foo', [
+ ("test_date_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyDate_FromDate(2000, 6, 6);
+ PyDateTime_Date* d = (PyDateTime_Date*)obj;
+
+ PyDateTime_GET_YEAR(obj);
+ PyDateTime_GET_YEAR(d);
+
+ PyDateTime_GET_MONTH(obj);
+ PyDateTime_GET_MONTH(d);
+
+ PyDateTime_GET_DAY(obj);
+ PyDateTime_GET_DAY(d);
+
+ return obj;
+ """),
+ ("test_datetime_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyDateTime_FromDateAndTime(2000, 6, 6, 6, 6, 6, 6);
+ PyDateTime_DateTime* dt = (PyDateTime_DateTime*)obj;
+
+ PyDateTime_GET_YEAR(obj);
+ PyDateTime_GET_YEAR(dt);
+
+ PyDateTime_GET_MONTH(obj);
+ PyDateTime_GET_MONTH(dt);
+
+ PyDateTime_GET_DAY(obj);
+ PyDateTime_GET_DAY(dt);
+
+ PyDateTime_DATE_GET_HOUR(obj);
+ PyDateTime_DATE_GET_HOUR(dt);
+
+ PyDateTime_DATE_GET_MINUTE(obj);
+ PyDateTime_DATE_GET_MINUTE(dt);
+
+ PyDateTime_DATE_GET_SECOND(obj);
+ PyDateTime_DATE_GET_SECOND(dt);
+
+ PyDateTime_DATE_GET_MICROSECOND(obj);
+ PyDateTime_DATE_GET_MICROSECOND(dt);
+
+ return obj;
+ """),
+ ("test_time_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyTime_FromTime(6, 6, 6, 6);
+ PyDateTime_Time* t = (PyDateTime_Time*)obj;
+
+ PyDateTime_TIME_GET_HOUR(obj);
+ PyDateTime_TIME_GET_HOUR(t);
+
+ PyDateTime_TIME_GET_MINUTE(obj);
+ PyDateTime_TIME_GET_MINUTE(t);
+
+ PyDateTime_TIME_GET_SECOND(obj);
+ PyDateTime_TIME_GET_SECOND(t);
+
+ PyDateTime_TIME_GET_MICROSECOND(obj);
+ PyDateTime_TIME_GET_MICROSECOND(t);
+
+ return obj;
+ """),
+ ("test_delta_macros", "METH_NOARGS",
+ """
+ PyDateTime_IMPORT;
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI");
+ return NULL;
+ }
+ PyObject* obj = PyDelta_FromDSU(6, 6, 6);
+ PyDateTime_Delta* delta = (PyDateTime_Delta*)obj;
+
+ PyDateTime_DELTA_GET_DAYS(obj);
+ PyDateTime_DELTA_GET_DAYS(delta);
+
+ PyDateTime_DELTA_GET_SECONDS(obj);
+ PyDateTime_DELTA_GET_SECONDS(delta);
+
+ PyDateTime_DELTA_GET_MICROSECONDS(obj);
+ PyDateTime_DELTA_GET_MICROSECONDS(delta);
+
+ return obj;
+ """),
+ ])
diff --git a/pypy/module/cpyext/test/test_floatobject.py b/pypy/module/cpyext/test/test_floatobject.py
--- a/pypy/module/cpyext/test/test_floatobject.py
+++ b/pypy/module/cpyext/test/test_floatobject.py
@@ -77,3 +77,19 @@
neginf = module.return_neginf()
assert neginf < 0
assert math.isinf(neginf)
+
+ def test_macro_accepts_wrong_pointer_type(self):
+ import math
+
+ module = self.import_extension('foo', [
+ ("test_macros", "METH_NOARGS",
+ """
+ PyObject* o = PyFloat_FromDouble(1.0);
+ // no PyFloatObject
+ char* dumb_pointer = (char*)o;
+
+ PyFloat_AS_DOUBLE(o);
+ PyFloat_AS_DOUBLE(dumb_pointer);
+
+ Py_RETURN_NONE;"""),
+ ])
diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py
--- a/pypy/module/cpyext/test/test_intobject.py
+++ b/pypy/module/cpyext/test/test_intobject.py
@@ -191,3 +191,17 @@
i = mod.test_int()
assert isinstance(i, int)
assert i == 42
+
+ def test_int_macros(self):
+ mod = self.import_extension('foo', [
+ ("test_macros", "METH_NOARGS",
+ """
+ PyObject * obj = PyInt_FromLong(42);
+ PyIntObject * i = (PyIntObject*)obj;
+ PyInt_AS_LONG(obj);
+ PyInt_AS_LONG(i);
+ Py_RETURN_NONE;
+ """
+ ),
+ ])
+
diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py
--- a/pypy/module/cpyext/test/test_listobject.py
+++ b/pypy/module/cpyext/test/test_listobject.py
@@ -137,6 +137,33 @@
module.setlistitem(l,0)
assert l == [None, 2, 3]
+ def test_list_macros(self):
+ """The PyList_* macros cast, and calls expecting that build."""
+ module = self.import_extension('foo', [
+ ("test_macro_invocations", "METH_NOARGS",
+ """
+ PyObject* o = PyList_New(2);
+ PyListObject* l = (PyListObject*)o;
+
+
+ Py_INCREF(o);
+ PyList_SET_ITEM(o, 0, o);
+ Py_INCREF(o);
+ PyList_SET_ITEM(l, 1, o);
+
+ PyList_GET_ITEM(o, 0);
+ PyList_GET_ITEM(l, 1);
+
+ PyList_GET_SIZE(o);
+ PyList_GET_SIZE(l);
+
+ return o;
+ """
+ )
+ ])
+ x = module.test_macro_invocations()
+ assert x[0] is x[1] is x
+
def test_get_item_macro(self):
module = self.import_extension('foo', [
("test_get_item", "METH_NOARGS",
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -155,6 +155,30 @@
result = api.PySequence_Index(w_gen, w_tofind)
assert result == 4
+class AppTestSetObject(AppTestCpythonExtensionBase):
+ def test_sequence_macro_cast(self):
+ module = self.import_extension('foo', [
+ ("test_macro_cast", "METH_NOARGS",
+ """
+ PyObject* o = PyList_New(0);
+ PyList_Append(o, o);
+ PyListObject* l = (PyListObject*)o;
+
+ PySequence_Fast_GET_ITEM(o, 0);
+ PySequence_Fast_GET_ITEM(l, 0);
+
+ PySequence_Fast_GET_SIZE(o);
+ PySequence_Fast_GET_SIZE(l);
+
+ PySequence_ITEM(o, 0);
+ PySequence_ITEM(l, 0);
+
+ return o;
+ """
+ )
+ ])
+
+
class TestCPyListStrategy(BaseApiTest):
def test_getitem_setitem(self, space, api):
w_l = space.wrap([1, 2, 3, 4])
diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py
--- a/pypy/module/cpyext/test/test_setobject.py
+++ b/pypy/module/cpyext/test/test_setobject.py
@@ -2,6 +2,7 @@
from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref
from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from rpython.rtyper.lltypesystem import rffi, lltype
@@ -45,3 +46,20 @@
w_frozenset = space.newfrozenset([space.wrap(i) for i in [1, 2, 3, 4]])
assert api.PyAnySet_CheckExact(w_set)
assert api.PyAnySet_CheckExact(w_frozenset)
+
+class AppTestSetObject(AppTestCpythonExtensionBase):
+ def test_set_macro_cast(self):
+ module = self.import_extension('foo', [
+ ("test_macro_cast", "METH_NOARGS",
+ """
+ PyObject* o = PySet_New(NULL);
+ // no PySetObject
+ char* dumb_pointer = (char*) o;
+
+ PySet_GET_SIZE(o);
+ PySet_GET_SIZE(dumb_pointer);
+
+ return o;
+ """
+ )
+ ])
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -111,6 +111,26 @@
assert isinstance(res, str)
assert res == 'caf?'
+ def test_unicode_macros(self):
+ """The PyUnicode_* macros cast, and calls expecting that build."""
+ module = self.import_extension('foo', [
+ ("test_macro_invocations", "METH_NOARGS",
+ """
+ PyObject* o = PyUnicode_FromString("");
+ PyUnicodeObject* u = (PyUnicodeObject*)o;
+
+ PyUnicode_GET_SIZE(u);
+ PyUnicode_GET_SIZE(o);
+
+ PyUnicode_GET_DATA_SIZE(u);
+ PyUnicode_GET_DATA_SIZE(o);
+
+ PyUnicode_AS_UNICODE(o);
+ PyUnicode_AS_UNICODE(u);
+ return o;
+ """)])
+ assert module.test_macro_invocations() == u''
+
class TestUnicode(BaseApiTest):
def test_unicodeobject(self, space, api):
assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4
diff --git a/pypy/module/cpyext/test/test_weakref.py b/pypy/module/cpyext/test/test_weakref.py
--- a/pypy/module/cpyext/test/test_weakref.py
+++ b/pypy/module/cpyext/test/test_weakref.py
@@ -34,3 +34,25 @@
del w_obj
import gc; gc.collect()
assert space.is_w(api.PyWeakref_LockObject(w_ref), space.w_None)
+
+
+class AppTestWeakReference(AppTestCpythonExtensionBase):
+
+ def test_weakref_macro(self):
+ module = self.import_extension('foo', [
+ ("test_macro_cast", "METH_NOARGS",
+ """
+ // PyExc_Warning is some weak-reffable PyObject*.
+ PyObject* weakref_obj = PyWeakref_NewRef(PyExc_Warning, NULL);
+ if (!weakref_obj) return weakref_obj;
+ // No public PyWeakReference type.
+ char* dumb_pointer = (char*) weakref_obj;
+
+ PyWeakref_GET_OBJECT(weakref_obj);
+ PyWeakref_GET_OBJECT(dumb_pointer);
+
+ return weakref_obj;
+ """
+ )
+ ])
+ module.test_macro_cast()
From pypy.commits at gmail.com Wed May 11 03:16:02 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:16:02 -0700 (PDT)
Subject: [pypy-commit] pypy ufunc-outer: test, fix ufunc.outer,
following numpy's c implementation
Message-ID: <5732dc32.42191c0a.ea803.5b43@mx.google.com>
Author: Matti Picus
Branch: ufunc-outer
Changeset: r84362:fe644c4006dd
Date: 2016-05-11 10:13 +0300
http://bitbucket.org/pypy/pypy/changeset/fe644c4006dd/
Log: test, fix ufunc.outer, following numpy's c implementation
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -443,7 +443,7 @@
'array does not have imaginary part to set')
self.implementation.set_imag(space, self, w_value)
- def reshape(self, space, w_shape, order):
+ def reshape(self, space, w_shape, order=NPY.ANYORDER):
new_shape = get_shape_from_iterable(space, self.get_size(), w_shape)
new_impl = self.implementation.reshape(self, new_shape, order)
if new_impl is not None:
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -1480,7 +1480,21 @@
def test_outer(self):
import numpy as np
- from numpy import absolute
+ c = np.multiply.outer([1, 2, 3], [4, 5, 6])
+ assert c.shape == (3, 3)
+ assert (c ==[[ 4, 5, 6],
+ [ 8, 10, 12],
+ [12, 15, 18]]).all()
+ A = np.array([[1, 2, 3], [4, 5, 6]])
+ B = np.array([[1, 2, 3, 4]])
+ c = np.multiply.outer(A, B)
+ assert c.shape == (2, 3, 1, 4)
+ assert (c == [[[[ 1, 2, 3, 4]],
+ [[ 2, 4, 6, 8]],
+ [[ 3, 6, 9, 12]]],
+ [[[ 4, 8, 12, 16]],
+ [[ 5, 10, 15, 20]],
+ [[ 6, 12, 18, 24]]]]).all()
exc = raises(ValueError, np.absolute.outer, [-1, -2])
assert exc.value[0] == 'outer product only supported for binary functions'
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -363,12 +363,18 @@
out = space.call_method(obj, '__array_wrap__', out, space.w_None)
return out
- def descr_outer(self, space, __args__):
- return self._outer(space, __args__)
-
- def _outer(self, space, __args__):
- raise oefmt(space.w_ValueError,
+ def descr_outer(self, space, args_w):
+ if self.nin != 2:
+ raise oefmt(space.w_ValueError,
"outer product only supported for binary functions")
+ if len(args_w) != 2:
+ raise oefmt(space.w_ValueError,
+ "exactly two arguments expected")
+ args = [convert_to_array(space, w_obj) for w_obj in args_w]
+ w_outshape = [space.wrap(i) for i in args[0].get_shape() + [1]*args[1].ndims()]
+ args0 = args[0].reshape(space, space.newtuple(w_outshape))
+ return self.descr_call(space, Arguments.frompacked(space,
+ space.newlist([args0, args[1]])))
def parse_kwargs(self, space, kwds_w):
w_casting = kwds_w.pop('casting', None)
From pypy.commits at gmail.com Wed May 11 03:15:59 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 00:15:59 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast2: problems with api_struct
Message-ID: <5732dc2f.89141c0a.43652.5169@mx.google.com>
Author: Matti Picus
Branch: cpyext-macros-cast2
Changeset: r84360:ad44c12b677a
Date: 2016-05-11 09:25 +0300
http://bitbucket.org/pypy/pypy/changeset/ad44c12b677a/
Log: problems with api_struct
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -1027,8 +1027,8 @@
# added only for the macro, not the decl
continue
restype, args = c_function_signature(db, func)
- args = ', '.join(args) or "void"
- members.append('%s (*%s)(%s);' % (restype, name, args))
+ args_str = ', '.join(args) or "void"
+ members.append('%s (*%s)(%s);' % (restype, name, args_str))
structindex[name] = len(structindex)
structmembers = '\n'.join(members)
struct_declaration_code = """\
@@ -1268,16 +1268,14 @@
if not func:
continue
casts.append(name)
- _name = mangle_name(prefix, name)
- assert _name is not None, 'error converting %s' % name
- restype, args = c_function_signature(db, func)
- l_args = ', '.join(['a%d' % i for i in xrange(len(args))])
- r_args = ', '.join(['(%s)a%d' % (a.split('arg')[0], i)
+ if not api_struct:
+ _name = mangle_name(prefix, name)
+ assert _name is not None, 'error converting %s' % name
+ restype, args = c_function_signature(db, func)
+ l_args = ', '.join(['a%d' % i for i in xrange(len(args))])
+ r_args = ', '.join(['(%s)a%d' % (a.split('arg')[0], i)
for i,a in enumerate(args)])
- _name = mangle_name(prefix, name)
- header.append("#define %s(%s) %s(%s)" % (name, l_args, _name, r_args))
- print casts
- xxxx
+ header.append("#define %s(%s) %s(%s)" % (name, l_args, _name, r_args))
for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems():
if header_name not in decls:
header = decls[header_name] = []
@@ -1289,13 +1287,13 @@
for name, func in sorted(header_functions.iteritems()):
if not func:
continue
+ _name = mangle_name(prefix, name)
+ assert _name is not None, 'error converting %s' % name
if name not in casts:
- _name = mangle_name(prefix, name)
- assert _name is not None, 'error converting %s' % name
header.append("#define %s %s" % (name, _name))
restype, args = c_function_signature(db, func)
- args = ', '.join(args) or "void"
- header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args))
+ args_str = ', '.join(args) or "void"
+ header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args_str))
if api_struct:
callargs = ', '.join('arg%d' % (i,)
for i in range(len(func.argtypes)))
@@ -1303,7 +1301,7 @@
body = "{ _pypyAPI.%s(%s); }" % (_name, callargs)
else:
body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs)
- functions.append('%s %s(%s)\n%s' % (restype, name, args, body))
+ functions.append('%s %s(%s)\n%s' % (restype, name, args_str, body))
for name in VA_TP_LIST:
name_no_star = process_va_name(name)
header = ('%s pypy_va_get_%s(va_list* vp)' %
From pypy.commits at gmail.com Wed May 11 03:31:40 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 00:31:40 -0700 (PDT)
Subject: [pypy-commit] pypy default: Forgot to fix _winreg for the new
_finalize_() style
Message-ID: <5732dfdc.82bb1c0a.33888.505c@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84363:ee153d8516f5
Date: 2016-05-11 09:15 +0200
http://bitbucket.org/pypy/pypy/changeset/ee153d8516f5/
Log: Forgot to fix _winreg for the new _finalize_() style
diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py
--- a/pypy/module/_winreg/interp_winreg.py
+++ b/pypy/module/_winreg/interp_winreg.py
@@ -14,10 +14,11 @@
space.wrap(message)]))
class W_HKEY(W_Root):
- def __init__(self, hkey):
+ def __init__(self, space, hkey):
self.hkey = hkey
+ self.register_finalizer(space)
- def descr_del(self, space):
+ def _finalize_(self, space):
self.Close(space)
def as_int(self):
@@ -64,7 +65,7 @@
@unwrap_spec(key=int)
def new_HKEY(space, w_subtype, key):
hkey = rffi.cast(rwinreg.HKEY, key)
- return space.wrap(W_HKEY(hkey))
+ return space.wrap(W_HKEY(space, hkey))
descr_HKEY_new = interp2app(new_HKEY)
W_HKEY.typedef = TypeDef(
@@ -91,7 +92,6 @@
__int__ - Converting a handle to an integer returns the Win32 handle.
__cmp__ - Handle objects are compared using the handle value.""",
__new__ = descr_HKEY_new,
- __del__ = interp2app(W_HKEY.descr_del),
__repr__ = interp2app(W_HKEY.descr_repr),
__int__ = interp2app(W_HKEY.descr_int),
__nonzero__ = interp2app(W_HKEY.descr_nonzero),
@@ -480,7 +480,7 @@
ret = rwinreg.RegCreateKey(hkey, subkey, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'CreateKey')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(subkey=str, res=int, sam=rffi.r_uint)
def CreateKeyEx(space, w_hkey, subkey, res=0, sam=rwinreg.KEY_WRITE):
@@ -502,7 +502,7 @@
lltype.nullptr(rwin32.LPDWORD.TO))
if ret != 0:
raiseWindowsError(space, ret, 'CreateKeyEx')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(subkey=str)
def DeleteKey(space, w_hkey, subkey):
@@ -549,7 +549,7 @@
ret = rwinreg.RegOpenKeyEx(hkey, subkey, res, sam, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegOpenKeyEx')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(index=int)
def EnumValue(space, w_hkey, index):
@@ -688,7 +688,7 @@
ret = rwinreg.RegConnectRegistry(machine, hkey, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegConnectRegistry')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(source=unicode)
def ExpandEnvironmentStrings(space, source):
From pypy.commits at gmail.com Wed May 11 03:33:41 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 00:33:41 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: Revert the whole change for
int_{add, sub, mul}_ovf. It can be argued
Message-ID: <5732e055.878d1c0a.287ef.5317@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84365:7d31bc576cbc
Date: 2016-05-11 09:33 +0200
http://bitbucket.org/pypy/pypy/changeset/7d31bc576cbc/
Log: Revert the whole change for int_{add,sub,mul}_ovf. It can be argued
that the C backend should handle them directly, and more
prosaically, it goes in the way of tests if they start seeing
'cast_int_to_float' and other unexpected operations
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -333,6 +333,17 @@
rewrite_op_float_gt = _rewrite_symmetric
rewrite_op_float_ge = _rewrite_symmetric
+ def rewrite_op_int_add_ovf(self, op):
+ op0 = self._rewrite_symmetric(op)
+ op1 = SpaceOperation('-live-', [], None)
+ return [op1, op0]
+
+ rewrite_op_int_mul_ovf = rewrite_op_int_add_ovf
+
+ def rewrite_op_int_sub_ovf(self, op):
+ op1 = SpaceOperation('-live-', [], None)
+ return [op1, op]
+
def _noop_rewrite(self, op):
return op
@@ -426,7 +437,7 @@
if oopspec_name.startswith('list.') or oopspec_name.startswith('newlist'):
prepare = self._handle_list_call
elif oopspec_name.startswith('int.'):
- prepare = self._handle_int_ovf
+ prepare = self._handle_int_special
elif oopspec_name.startswith('stroruni.'):
prepare = self._handle_stroruni_call
elif oopspec_name == 'str.str2unicode':
@@ -1479,6 +1490,7 @@
for _old, _new in [('bool_not', 'int_is_zero'),
('cast_bool_to_float', 'cast_int_to_float'),
+ ('int_add_nonneg_ovf', 'int_add_ovf'),
('keepalive', '-live-'),
('char_lt', 'int_lt'),
@@ -1902,22 +1914,15 @@
llmemory.cast_ptr_to_adr(c_func.value))
self.callcontrol.callinfocollection.add(oopspecindex, calldescr, func)
- def _handle_int_ovf(self, op, oopspec_name, args):
- opname = oopspec_name.replace('.', '_')
- if oopspec_name in ('int.add_ovf', 'int.sub_ovf', 'int.mul_ovf'):
- op0 = SpaceOperation(opname, args, op.result)
- if oopspec_name in ('int.add_ovf', 'int.mul_ovf'):
- op0 = self._rewrite_symmetric(op0)
- oplist = [SpaceOperation('-live-', [], None), op0]
- return oplist
- elif oopspec_name == 'int.neg_ovf':
+ def _handle_int_special(self, op, oopspec_name, args):
+ if oopspec_name == 'int.neg_ovf':
[v_x] = args
op0 = SpaceOperation('int_sub_ovf',
[Constant(0, lltype.Signed), v_x],
op.result)
- oplist = [SpaceOperation('-live-', [], None), op0]
- return oplist
+ return self.rewrite_operation(op0)
else:
+ opname = oopspec_name.replace('.', '_')
os = getattr(EffectInfo, 'OS_' + opname.upper())
return self._handle_oopspec_call(op, args, os,
EffectInfo.EF_ELIDABLE_CANNOT_RAISE)
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -71,9 +71,6 @@
_descr_cannot_raise = FakeDescr()
callinfocollection = FakeCallInfoCollection()
def guess_call_kind(self, op):
- if op.args[0].value._obj._name.startswith(
- ('ll_int_add_ovf', 'll_int_sub_ovf', 'll_int_mul_ovf')):
- return 'builtin'
return 'residual'
def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE,
extraeffect=None, extradescr=None):
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -272,17 +272,17 @@
assert op1.result == v3
assert op1.opname == name2[0]
- at py.test.mark.parametrize('opname', ['add_ovf', 'mul_ovf'])
-def test_symmetric_op_ovf(opname):
+ at py.test.mark.parametrize('opname', ['add_ovf', 'sub_ovf', 'mul_ovf'])
+def test_int_op_ovf(opname):
v3 = varoftype(lltype.Signed)
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
- op = SpaceOperation('direct_call', [Constant(opname), v1, v2], v3)
- oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
- [v1, v2])
+ op = SpaceOperation('int_' + opname, [v1, v2], v3)
+ oplist = Transformer(FakeCPU()).rewrite_operation(op)
op1, op0 = oplist
- assert op0.opname == 'int_'+opname
- if isinstance(v1, Constant) and isinstance(v2, Variable):
+ assert op0.opname == 'int_' + opname
+ if (isinstance(v1, Constant) and isinstance(v2, Variable)
+ and opname != 'sub_ovf'):
assert op0.args == [v2, v1]
assert op0.result == v3
else:
@@ -292,27 +292,12 @@
assert op1.args == []
assert op1.result is None
- at py.test.mark.parametrize('opname', ['sub_ovf'])
-def test_asymmetric_op_ovf(opname):
- v3 = varoftype(lltype.Signed)
- for v1 in [varoftype(lltype.Signed), const(42)]:
- for v2 in [varoftype(lltype.Signed), const(43)]:
- op = SpaceOperation('direct_call', [Constant(opname), v1, v2], v3)
- oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.'+opname,
- [v1, v2])
- op1, op0 = oplist
- assert op0.opname == 'int_'+opname
- assert op0.args == [v1, v2]
- assert op0.result == v3
- assert op1.opname == '-live-'
- assert op1.args == []
- assert op1.result is None
-
def test_neg_ovf():
v3 = varoftype(lltype.Signed)
for v1 in [varoftype(lltype.Signed), const(42)]:
op = SpaceOperation('direct_call', [Constant('neg_ovf'), v1], v3)
- oplist = Transformer(FakeCPU())._handle_int_ovf(op, 'int.neg_ovf', [v1])
+ oplist = Transformer(FakeCPU())._handle_int_special(op, 'int.neg_ovf',
+ [v1])
op1, op0 = oplist
assert op0.opname == 'int_sub_ovf'
assert op0.args == [Constant(0), v1]
@@ -322,13 +307,13 @@
assert op1.result is None
@py.test.mark.parametrize('opname', ['py_div', 'udiv', 'py_mod', 'umod'])
-def test_asymmetric_op_residual(opname):
+def test_int_op_residual(opname):
v3 = varoftype(lltype.Signed)
tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
for v1 in [varoftype(lltype.Signed), const(42)]:
for v2 in [varoftype(lltype.Signed), const(43)]:
op = SpaceOperation('direct_call', [Constant(opname), v1, v2], v3)
- op0 = tr._handle_int_ovf(op, 'int.'+opname, [v1, v2])
+ op0 = tr._handle_int_special(op, 'int.'+opname, [v1, v2])
assert op0.opname == 'residual_call_ir_i'
assert op0.args[0].value == opname # pseudo-function as str
expected = ('int_' + opname).upper()
diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py
--- a/rpython/rtyper/llinterp.py
+++ b/rpython/rtyper/llinterp.py
@@ -1073,6 +1073,38 @@
def op_track_alloc_stop(self, addr):
checkadr(addr)
+ # ____________________________________________________________
+ # Overflow-detecting variants
+
+ def op_int_add_ovf(self, x, y):
+ assert isinstance(x, (int, long, llmemory.AddressOffset))
+ assert isinstance(y, (int, long, llmemory.AddressOffset))
+ try:
+ return ovfcheck(x + y)
+ except OverflowError:
+ self.make_llexception()
+
+ def op_int_add_nonneg_ovf(self, x, y):
+ if isinstance(y, int):
+ assert y >= 0
+ return self.op_int_add_ovf(x, y)
+
+ def op_int_sub_ovf(self, x, y):
+ assert isinstance(x, (int, long))
+ assert isinstance(y, (int, long))
+ try:
+ return ovfcheck(x - y)
+ except OverflowError:
+ self.make_llexception()
+
+ def op_int_mul_ovf(self, x, y):
+ assert isinstance(x, (int, long, llmemory.AddressOffset))
+ assert isinstance(y, (int, long, llmemory.AddressOffset))
+ try:
+ return ovfcheck(x * y)
+ except OverflowError:
+ self.make_llexception()
+
def op_int_is_true(self, x):
# special case
if type(x) is CDefinedIntSymbolic:
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -212,6 +212,12 @@
'int_between': LLOp(canfold=True), # a <= b < c
'int_force_ge_zero': LLOp(canfold=True), # 0 if a < 0 else a
+ 'int_add_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
+ 'int_add_nonneg_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
+ # ^^^ more efficient version when 2nd arg is nonneg
+ 'int_sub_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
+ 'int_mul_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
+
'uint_is_true': LLOp(canfold=True),
'uint_invert': LLOp(canfold=True),
diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py
--- a/rpython/rtyper/rint.py
+++ b/rpython/rtyper/rint.py
@@ -219,21 +219,21 @@
hop = hop.copy()
hop.swap_fst_snd_args()
func = 'add_nonneg_ovf'
- return _rtype_call_helper(hop, func)
+ return _rtype_template(hop, func)
def rtype_sub(_, hop):
return _rtype_template(hop, 'sub')
rtype_inplace_sub = rtype_sub
def rtype_sub_ovf(_, hop):
- return _rtype_call_helper(hop, 'sub_ovf')
+ return _rtype_template(hop, 'sub_ovf')
def rtype_mul(_, hop):
return _rtype_template(hop, 'mul')
rtype_inplace_mul = rtype_mul
def rtype_mul_ovf(_, hop):
- return _rtype_call_helper(hop, 'mul_ovf')
+ return _rtype_template(hop, 'mul_ovf')
def rtype_floordiv(_, hop):
return _rtype_call_helper(hop, 'floordiv', [ZeroDivisionError])
@@ -307,9 +307,6 @@
"""Write a simple operation implementing the given 'func'.
It must be an operation that cannot raise.
"""
- if '_ovf' in func or func.startswith(('mod', 'floordiv')):
- raise TyperError("%r should not be used here any more" % (func,))
-
r_result = hop.r_result
if r_result.lowleveltype == Bool:
repr = signed_repr
@@ -320,9 +317,17 @@
else:
repr2 = repr
vlist = hop.inputargs(repr, repr2)
- hop.exception_cannot_occur()
+ prefix = repr.opprefix
- prefix = repr.opprefix
+ if '_ovf' in func or func.startswith(('mod', 'floordiv')):
+ if prefix+func not in ('int_add_ovf', 'int_add_nonneg_ovf',
+ 'int_sub_ovf', 'int_mul_ovf'):
+ raise TyperError("%r should not be used here any more" % (func,))
+ hop.has_implicit_exception(OverflowError)
+ hop.exception_is_here()
+ else:
+ hop.exception_cannot_occur()
+
v_res = hop.genop(prefix+func, vlist, resulttype=repr)
v_res = hop.llops.convertvar(v_res, repr, r_result)
return v_res
@@ -533,59 +538,6 @@
return ll_lllong_mod(x, y)
-# ---------- add, sub, mul ----------
-
- at jit.oopspec("int.add_ovf(x, y)")
-def ll_int_add_ovf(x, y):
- r = intmask(r_uint(x) + r_uint(y))
- if r^x < 0 and r^y < 0:
- raise OverflowError("integer addition")
- return r
-
- at jit.oopspec("int.add_ovf(x, y)")
-def ll_int_add_nonneg_ovf(x, y): # y can be assumed >= 0
- r = intmask(r_uint(x) + r_uint(y))
- if r < x:
- raise OverflowError("integer addition")
- return r
-
- at jit.oopspec("int.sub_ovf(x, y)")
-def ll_int_sub_ovf(x, y):
- r = intmask(r_uint(x) - r_uint(y))
- if r^x < 0 and r^~y < 0:
- raise OverflowError("integer subtraction")
- return r
-
- at jit.oopspec("int.mul_ovf(a, b)")
-def ll_int_mul_ovf(a, b):
- if INT_BITS_1 < LLONG_BITS_1:
- rr = r_longlong(a) * r_longlong(b)
- r = intmask(rr)
- if r_longlong(r) != rr:
- raise OverflowError("integer multiplication")
- return r
- else:
- longprod = intmask(a * b)
- doubleprod = float(a) * float(b)
- doubled_longprod = float(longprod)
-
- # Fast path for normal case: small multiplicands, and no info
- # is lost in either method.
- if doubled_longprod == doubleprod:
- return longprod
-
- # Somebody somewhere lost info. Close enough, or way off? Note
- # that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0).
- # The difference either is or isn't significant compared to the
- # true value (of which doubleprod is a good approximation).
- # absdiff/absprod <= 1/32 iff 32 * absdiff <= absprod -- 5 good
- # bits is "close enough"
- if 32.0 * abs(doubled_longprod - doubleprod) <= abs(doubleprod):
- return longprod
-
- raise OverflowError("integer multiplication")
-
-
# ---------- lshift, neg, abs ----------
def ll_int_lshift_ovf(x, y):
diff --git a/rpython/rtyper/test/test_rint.py b/rpython/rtyper/test/test_rint.py
--- a/rpython/rtyper/test/test_rint.py
+++ b/rpython/rtyper/test/test_rint.py
@@ -6,6 +6,7 @@
from rpython.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between
from rpython.rlib import objectmodel
from rpython.rtyper.test.tool import BaseRtypingTest
+from rpython.flowspace.model import summary
class TestSnippet(object):
@@ -380,6 +381,8 @@
except OverflowError:
return 1
return a
+ t, rtyper, graph = self.gengraph(f, [int])
+ assert summary(graph).get('int_add_nonneg_ovf') == 2
res = self.interpret(f, [-3])
assert res == 144
res = self.interpret(f, [sys.maxint-50])
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -800,6 +800,7 @@
srcdir / 'debug_traceback.c', # ifdef HAVE_RTYPER
srcdir / 'asm.c',
srcdir / 'instrument.c',
+ srcdir / 'int.c',
srcdir / 'stack.c',
srcdir / 'threadlocal.c',
]
diff --git a/rpython/translator/c/src/exception.c b/rpython/translator/c/src/exception.c
--- a/rpython/translator/c/src/exception.c
+++ b/rpython/translator/c/src/exception.c
@@ -32,6 +32,13 @@
RPyClearException(); \
} while (0)
+/* implementations */
+
+void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc)
+{
+ RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(rexc), rexc);
+}
+
/******************************************************************/
#endif /* HAVE_RTYPER */
diff --git a/rpython/translator/c/src/exception.h b/rpython/translator/c/src/exception.h
--- a/rpython/translator/c/src/exception.h
+++ b/rpython/translator/c/src/exception.h
@@ -35,4 +35,9 @@
RPyClearException(); \
} while (0)
+/* prototypes */
+
+RPY_EXTERN
+void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc);
+
#endif
diff --git a/rpython/translator/c/src/int.c b/rpython/translator/c/src/int.c
new file mode 100644
--- /dev/null
+++ b/rpython/translator/c/src/int.c
@@ -0,0 +1,45 @@
+#include "common_header.h"
+#include "structdef.h"
+#include "forwarddecl.h"
+#include "preimpl.h"
+#include
+#include
+#include
+
+/* adjusted from intobject.c, Python 2.3.3 */
+
+long long op_llong_mul_ovf(long long a, long long b)
+{
+ double doubled_longprod; /* (double)longprod */
+ double doubleprod; /* (double)a * (double)b */
+ long long longprod;
+
+ longprod = a * b;
+ doubleprod = (double)a * (double)b;
+ doubled_longprod = (double)longprod;
+
+ /* Fast path for normal case: small multiplicands, and no info
+ is lost in either method. */
+ if (doubled_longprod == doubleprod)
+ return longprod;
+
+ /* Somebody somewhere lost info. Close enough, or way off? Note
+ that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0).
+ The difference either is or isn't significant compared to the
+ true value (of which doubleprod is a good approximation).
+ */
+ {
+ const double diff = doubled_longprod - doubleprod;
+ const double absdiff = diff >= 0.0 ? diff : -diff;
+ const double absprod = doubleprod >= 0.0 ? doubleprod :
+ -doubleprod;
+ /* absdiff/absprod <= 1/32 iff
+ 32 * absdiff <= absprod -- 5 good bits is "close enough" */
+ if (32.0 * absdiff <= absprod)
+ return longprod;
+
+ FAIL_OVF("integer multiplication");
+ return -1;
+ }
+}
+
diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h
--- a/rpython/translator/c/src/int.h
+++ b/rpython/translator/c/src/int.h
@@ -45,9 +45,36 @@
/* addition, subtraction */
#define OP_INT_ADD(x,y,r) r = (x) + (y)
+
+/* cast to avoid undefined behaviour on overflow */
+#define OP_INT_ADD_OVF(x,y,r) \
+ r = (Signed)((Unsigned)x + y); \
+ if ((r^x) < 0 && (r^y) < 0) FAIL_OVF("integer addition")
+
+#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \
+ r = (Signed)((Unsigned)x + y); \
+ if ((r&~x) < 0) FAIL_OVF("integer addition")
+
#define OP_INT_SUB(x,y,r) r = (x) - (y)
+
+#define OP_INT_SUB_OVF(x,y,r) \
+ r = (Signed)((Unsigned)x - y); \
+ if ((r^x) < 0 && (r^~y) < 0) FAIL_OVF("integer subtraction")
+
#define OP_INT_MUL(x,y,r) r = (x) * (y)
+#if SIZEOF_LONG * 2 <= SIZEOF_LONG_LONG
+#define OP_INT_MUL_OVF(x,y,r) \
+ { \
+ long long _lr = (long long)x * y; \
+ r = (long)_lr; \
+ if (_lr != (long long)r) FAIL_OVF("integer multiplication"); \
+ }
+#else
+#define OP_INT_MUL_OVF(x,y,r) \
+ r = op_llong_mul_ovf(x, y) /* long == long long */
+#endif
+
/* shifting */
/* NB. shifting has same limitations as C: the shift count must be
diff --git a/rpython/translator/c/src/support.h b/rpython/translator/c/src/support.h
--- a/rpython/translator/c/src/support.h
+++ b/rpython/translator/c/src/support.h
@@ -8,6 +8,8 @@
#define RUNNING_ON_LLINTERP 0
#define OP_JIT_RECORD_EXACT_CLASS(i, c, r) /* nothing */
+#define FAIL_OVF(msg) _RPyRaiseSimpleException(RPyExc_OverflowError)
+
/* Extra checks can be enabled with the RPY_ASSERT or RPY_LL_ASSERT
* macros. They differ in the level at which the tests are made.
* Remember that RPython lists, for example, are implemented as a
From pypy.commits at gmail.com Wed May 11 03:33:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 00:33:43 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: merge heads
Message-ID: <5732e057.4e981c0a.2c4dc.530c@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84366:5bca3ef7ba7c
Date: 2016-05-11 09:33 +0200
http://bitbucket.org/pypy/pypy/changeset/5bca3ef7ba7c/
Log: merge heads
diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py
--- a/pypy/module/__pypy__/interp_intop.py
+++ b/pypy/module/__pypy__/interp_intop.py
@@ -2,6 +2,19 @@
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.rarithmetic import r_uint, intmask
+from rpython.rlib import jit
+
+
+# XXX maybe temporary: hide llop.int_{floordiv,mod} from the JIT,
+# because now it expects only Python-style divisions, not the
+# C-style divisions of these two ll operations
+ at jit.dont_look_inside
+def _int_floordiv(n, m):
+ return llop.int_floordiv(lltype.Signed, n, m)
+
+ at jit.dont_look_inside
+def _int_mod(n, m):
+ return llop.int_mod(lltype.Signed, n, m)
@unwrap_spec(n=int, m=int)
@@ -18,11 +31,11 @@
@unwrap_spec(n=int, m=int)
def int_floordiv(space, n, m):
- return space.wrap(llop.int_floordiv(lltype.Signed, n, m))
+ return space.wrap(_int_floordiv(n, m))
@unwrap_spec(n=int, m=int)
def int_mod(space, n, m):
- return space.wrap(llop.int_mod(lltype.Signed, n, m))
+ return space.wrap(_int_mod(n, m))
@unwrap_spec(n=int, m=int)
def int_lshift(space, n, m):
From pypy.commits at gmail.com Wed May 11 03:33:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 00:33:39 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: hg merge default
Message-ID: <5732e053.6322c20a.6135f.2fd2@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84364:734a91c841ee
Date: 2016-05-11 08:24 +0200
http://bitbucket.org/pypy/pypy/changeset/734a91c841ee/
Log: hg merge default
diff too long, truncating to 2000 out of 37072 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -21,3 +21,4 @@
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -222,39 +213,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
- BoolOption("withrangelist",
- "enable special range list implementation that does not "
- "actually create the full list until the resulting "
- "list is mutated",
- default=False),
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -265,22 +231,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -296,15 +250,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -317,16 +266,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -108,9 +108,9 @@
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,127 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
+
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+In theory, it would kind of work if you cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +141,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +245,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +256,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details ` are `available here `.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
List Optimizations
@@ -114,8 +120,8 @@
created. This gives the memory and speed behaviour of ``xrange`` and the generality
of use of ``range``, and makes ``xrange`` essentially useless.
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
User Class Optimizations
@@ -133,8 +139,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -10,3 +10,82 @@
.. branch: gcheader-decl
Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo. Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+
+Update cpyext C-API support After this branch, we are almost able to support
+upstream numpy via cpyext, so we created (yet another) fork of numpy at
+github.com/pypy/numpy with the needed changes. Among the significant changes
+to cpyext:
+ - allow c-snippet tests to be run with -A so we can verify we are compatible
+ - fix many edge cases exposed by fixing tests to run with -A
+ - issequence() logic matches cpython
+ - make PyStringObject and PyUnicodeObject field names compatible with cpython
+ - add prelminary support for PyDateTime_*
+ - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+ PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+ - PyAnySet_CheckExact, PyUnicode_Concat
+ - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+ primitives, also find a case where CPython will allow thread creation
+ before PyEval_InitThreads is run, dissallow on PyPy
+ - create a PyObject-specific list strategy
+ - rewrite slot assignment for typeobjects
+ - improve tracking of PyObject to rpython object mapping
+ - support tp_as_{number, sequence, mapping, buffer} slots
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
+
+.. branch: cpyext-test-A
+
+Get the cpyext tests to pass with "-A" (i.e. when tested directly with
+CPython).
+
+.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
+
+.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
+.. branch: cpyext-more-slots
+
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -78,7 +78,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
# we arrive here if no exception is raised. stdout cosmetics...
try:
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,11 +352,9 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,8 +16,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str, w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -399,8 +399,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -52,7 +53,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -67,8 +68,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -77,7 +78,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
return '?'
raise
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,37 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
+ """
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
"""
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -318,7 +330,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -389,9 +401,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
@@ -406,7 +418,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +452,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +488,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -706,8 +718,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -764,7 +775,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -772,7 +783,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -872,7 +883,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -896,13 +907,12 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
if idx == expected_length:
- raise OperationError(self.w_ValueError,
- self.wrap("too many values to unpack"))
+ raise oefmt(self.w_ValueError, "too many values to unpack")
items[idx] = w_item
idx += 1
if idx < expected_length:
@@ -942,7 +952,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -952,7 +962,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -962,8 +972,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1049,7 +1059,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1167,7 +1177,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1176,7 +1186,27 @@
return self.w_False
def issequence_w(self, w_obj):
- return (self.findattr(w_obj, self.wrap("__getitem__")) is not None)
+ if self.is_oldstyle_instance(w_obj):
+ return (self.findattr(w_obj, self.wrap('__getitem__')) is not None)
+ flag = self.type(w_obj).flag_map_or_seq
+ if flag == 'M':
+ return False
+ elif flag == 'S':
+ return True
+ else:
+ return (self.lookup(w_obj, '__getitem__') is not None)
+
+ def ismapping_w(self, w_obj):
+ if self.is_oldstyle_instance(w_obj):
+ return (self.findattr(w_obj, self.wrap('__getitem__')) is not None)
+ flag = self.type(w_obj).flag_map_or_seq
+ if flag == 'M':
+ return True
+ elif flag == 'S':
+ return False
+ else:
+ return (self.lookup(w_obj, '__getitem__') is not None and
+ self.lookup(w_obj, '__getslice__') is None)
# The code below only works
# for the simple case (new-style instance).
@@ -1267,7 +1297,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1310,8 +1340,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1331,8 +1360,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1345,7 +1373,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1355,7 +1383,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1376,20 +1404,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1506,7 +1531,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1535,8 +1560,8 @@
from rpython.rlib import rstring
result = w_obj.str_w(self)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1576,8 +1601,7 @@
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.isinstance_w(w_obj, self.w_str):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a string'))
+ raise oefmt(self.w_TypeError, "argument must be a string")
return self.str_w(w_obj)
def unicode_w(self, w_obj):
@@ -1588,16 +1612,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def bool_w(self, w_obj):
@@ -1616,8 +1640,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1625,8 +1649,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1634,8 +1657,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1643,8 +1665,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1653,11 +1675,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1685,7 +1705,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1696,7 +1716,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1711,22 +1731,20 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno() "
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if (not self.isinstance_w(w_fd, self.w_int) and
not self.isinstance_w(w_fd, self.w_long)):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
@@ -1838,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -214,9 +214,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
@@ -214,6 +220,7 @@
self._trace(frame, 'exception', None, operationerr)
#operationerr.print_detailed_traceback(self.space)
+ @jit.dont_look_inside
@specialize.arg(1)
def sys_exc_info(self, for_hidden=False):
"""Implements sys.exc_info().
@@ -225,15 +232,7 @@
# NOTE: the result is not the wrapped sys.exc_info() !!!
"""
- frame = self.gettopframe()
- while frame:
- if frame.last_exception is not None:
- if ((for_hidden or not frame.hide()) or
- frame.last_exception is
- get_cleared_operation_error(self.space)):
- return frame.last_exception
- frame = frame.f_backref()
- return None
+ return self.gettopframe()._exc_info_unroll(self.space, for_hidden)
def set_sys_exc_info(self, operror):
frame = self.gettopframe_nohidden()
@@ -467,6 +466,13 @@
list = self.fired_actions
if list is not None:
self.fired_actions = None
+ # NB. in case there are several actions, we reset each
+ # 'action._fired' to false only when we're about to call
+ # 'action.perform()'. This means that if
+ # 'action.fire()' happens to be called any time before
+ # the corresponding perform(), the fire() has no
+ # effect---which is the effect we want, because
+ # perform() will be called anyway.
for action in list:
action._fired = False
action.perform(ec, frame)
@@ -522,75 +528,98 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+ self._call_finalizer(w_obj)
+
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
From pypy.commits at gmail.com Wed May 11 03:34:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 00:34:33 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: test fix
Message-ID: <5732e089.41cec20a.180aa.3498@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84367:3d4ae3e9cc30
Date: 2016-05-11 09:34 +0200
http://bitbucket.org/pypy/pypy/changeset/3d4ae3e9cc30/
Log: test fix
diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
--- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
@@ -262,7 +262,7 @@
[i0]
i1 = int_add(i0, 1)
i2 = int_sub(i1, 10)
- i3 = int_floordiv(i2, 100)
+ i3 = int_xor(i2, 100)
i4 = int_mul(i1, 1000)
jump(i4)
"""
@@ -298,7 +298,7 @@
[i0]
i1 = int_add(i0, 1)
i2 = int_sub(i1, 10)
- i3 = int_floordiv(i2, 100)
+ i3 = int_xor(i2, 100)
i4 = int_mul(i1, 1000)
jump(i4)
"""
From pypy.commits at gmail.com Wed May 11 04:00:53 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 01:00:53 -0700 (PDT)
Subject: [pypy-commit] pypy default: Use the __builtin_add_overflow
built-ins if they are available:
Message-ID: <5732e6b5.821b1c0a.6ecc2.ffffdc61@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84368:e1b97a953f37
Date: 2016-05-11 10:01 +0200
http://bitbucket.org/pypy/pypy/changeset/e1b97a953f37/
Log: Use the __builtin_add_overflow built-ins if they are available: on
GCC >= 5, and on recent enough clang.
diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h
--- a/rpython/translator/c/src/int.h
+++ b/rpython/translator/c/src/int.h
@@ -53,7 +53,21 @@
/* addition, subtraction */
#define OP_INT_ADD(x,y,r) r = (x) + (y)
+#define OP_INT_SUB(x,y,r) r = (x) - (y)
+#define OP_INT_MUL(x,y,r) r = (x) * (y)
+
+#ifdef __GNUC__
+# if __GNUC__ >= 5
+# define HAVE_BUILTIN_OVERFLOW
+# elif defined(__has_builtin) /* clang */
+# if __has_builtin(__builtin_mul_overflow)
+# define HAVE_BUILTIN_OVERFLOW
+# endif
+# endif
+#endif
+
+#ifndef HAVE_BUILTIN_OVERFLOW
/* cast to avoid undefined behaviour on overflow */
#define OP_INT_ADD_OVF(x,y,r) \
r = (Signed)((Unsigned)x + y); \
@@ -63,14 +77,10 @@
r = (Signed)((Unsigned)x + y); \
if ((r&~x) < 0) FAIL_OVF("integer addition")
-#define OP_INT_SUB(x,y,r) r = (x) - (y)
-
#define OP_INT_SUB_OVF(x,y,r) \
r = (Signed)((Unsigned)x - y); \
if ((r^x) < 0 && (r^~y) < 0) FAIL_OVF("integer subtraction")
-#define OP_INT_MUL(x,y,r) r = (x) * (y)
-
#if SIZEOF_LONG * 2 <= SIZEOF_LONG_LONG && !defined(_WIN64)
#define OP_INT_MUL_OVF(x,y,r) \
{ \
@@ -83,6 +93,17 @@
r = op_llong_mul_ovf(x, y) /* long == long long */
#endif
+#else /* HAVE_BUILTIN_OVERFLOW */
+#define OP_INT_ADD_NONNEG_OVF(x,y,r) OP_INT_ADD_OVF(x,y,r)
+#define OP_INT_ADD_OVF(x,y,r) \
+ if (__builtin_add_overflow(x, y, &r)) FAIL_OVF("integer addition")
+#define OP_INT_SUB_OVF(x,y,r) \
+ if (__builtin_sub_overflow(x, y, &r)) FAIL_OVF("integer subtraction")
+#define OP_INT_MUL_OVF(x,y,r) \
+ if (__builtin_mul_overflow(x, y, &r)) FAIL_OVF("integer multiplication")
+#endif
+
+
/* shifting */
/* NB. shifting has same limitations as C: the shift count must be
From pypy.commits at gmail.com Wed May 11 04:01:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 01:01:39 -0700 (PDT)
Subject: [pypy-commit] pypy remove-raisingops: hg merge default
Message-ID: <5732e6e3.c9b0c20a.a1e25.3d2c@mx.google.com>
Author: Armin Rigo
Branch: remove-raisingops
Changeset: r84369:7652a3938aaa
Date: 2016-05-11 10:02 +0200
http://bitbucket.org/pypy/pypy/changeset/7652a3938aaa/
Log: hg merge default
diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py
--- a/pypy/module/_winreg/interp_winreg.py
+++ b/pypy/module/_winreg/interp_winreg.py
@@ -14,10 +14,11 @@
space.wrap(message)]))
class W_HKEY(W_Root):
- def __init__(self, hkey):
+ def __init__(self, space, hkey):
self.hkey = hkey
+ self.register_finalizer(space)
- def descr_del(self, space):
+ def _finalize_(self, space):
self.Close(space)
def as_int(self):
@@ -64,7 +65,7 @@
@unwrap_spec(key=int)
def new_HKEY(space, w_subtype, key):
hkey = rffi.cast(rwinreg.HKEY, key)
- return space.wrap(W_HKEY(hkey))
+ return space.wrap(W_HKEY(space, hkey))
descr_HKEY_new = interp2app(new_HKEY)
W_HKEY.typedef = TypeDef(
@@ -91,7 +92,6 @@
__int__ - Converting a handle to an integer returns the Win32 handle.
__cmp__ - Handle objects are compared using the handle value.""",
__new__ = descr_HKEY_new,
- __del__ = interp2app(W_HKEY.descr_del),
__repr__ = interp2app(W_HKEY.descr_repr),
__int__ = interp2app(W_HKEY.descr_int),
__nonzero__ = interp2app(W_HKEY.descr_nonzero),
@@ -480,7 +480,7 @@
ret = rwinreg.RegCreateKey(hkey, subkey, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'CreateKey')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(subkey=str, res=int, sam=rffi.r_uint)
def CreateKeyEx(space, w_hkey, subkey, res=0, sam=rwinreg.KEY_WRITE):
@@ -502,7 +502,7 @@
lltype.nullptr(rwin32.LPDWORD.TO))
if ret != 0:
raiseWindowsError(space, ret, 'CreateKeyEx')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(subkey=str)
def DeleteKey(space, w_hkey, subkey):
@@ -549,7 +549,7 @@
ret = rwinreg.RegOpenKeyEx(hkey, subkey, res, sam, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegOpenKeyEx')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(index=int)
def EnumValue(space, w_hkey, index):
@@ -688,7 +688,7 @@
ret = rwinreg.RegConnectRegistry(machine, hkey, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegConnectRegistry')
- return space.wrap(W_HKEY(rethkey[0]))
+ return space.wrap(W_HKEY(space, rethkey[0]))
@unwrap_spec(source=unicode)
def ExpandEnvironmentStrings(space, source):
diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h
--- a/rpython/translator/c/src/int.h
+++ b/rpython/translator/c/src/int.h
@@ -45,7 +45,21 @@
/* addition, subtraction */
#define OP_INT_ADD(x,y,r) r = (x) + (y)
+#define OP_INT_SUB(x,y,r) r = (x) - (y)
+#define OP_INT_MUL(x,y,r) r = (x) * (y)
+
+#ifdef __GNUC__
+# if __GNUC__ >= 5
+# define HAVE_BUILTIN_OVERFLOW
+# elif defined(__has_builtin) /* clang */
+# if __has_builtin(__builtin_mul_overflow)
+# define HAVE_BUILTIN_OVERFLOW
+# endif
+# endif
+#endif
+
+#ifndef HAVE_BUILTIN_OVERFLOW
/* cast to avoid undefined behaviour on overflow */
#define OP_INT_ADD_OVF(x,y,r) \
r = (Signed)((Unsigned)x + y); \
@@ -55,14 +69,10 @@
r = (Signed)((Unsigned)x + y); \
if ((r&~x) < 0) FAIL_OVF("integer addition")
-#define OP_INT_SUB(x,y,r) r = (x) - (y)
-
#define OP_INT_SUB_OVF(x,y,r) \
r = (Signed)((Unsigned)x - y); \
if ((r^x) < 0 && (r^~y) < 0) FAIL_OVF("integer subtraction")
-#define OP_INT_MUL(x,y,r) r = (x) * (y)
-
#if SIZEOF_LONG * 2 <= SIZEOF_LONG_LONG
#define OP_INT_MUL_OVF(x,y,r) \
{ \
@@ -75,6 +85,17 @@
r = op_llong_mul_ovf(x, y) /* long == long long */
#endif
+#else /* HAVE_BUILTIN_OVERFLOW */
+#define OP_INT_ADD_NONNEG_OVF(x,y,r) OP_INT_ADD_OVF(x,y,r)
+#define OP_INT_ADD_OVF(x,y,r) \
+ if (__builtin_add_overflow(x, y, &r)) FAIL_OVF("integer addition")
+#define OP_INT_SUB_OVF(x,y,r) \
+ if (__builtin_sub_overflow(x, y, &r)) FAIL_OVF("integer subtraction")
+#define OP_INT_MUL_OVF(x,y,r) \
+ if (__builtin_mul_overflow(x, y, &r)) FAIL_OVF("integer multiplication")
+#endif
+
+
/* shifting */
/* NB. shifting has same limitations as C: the shift count must be
From pypy.commits at gmail.com Wed May 11 05:31:25 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 02:31:25 -0700 (PDT)
Subject: [pypy-commit] pypy ufunc-outer: close branch to be merged
Message-ID: <5732fbed.41c8c20a.1d2c5.65dd@mx.google.com>
Author: Matti Picus
Branch: ufunc-outer
Changeset: r84370:a3a74d141f2a
Date: 2016-05-11 12:28 +0300
http://bitbucket.org/pypy/pypy/changeset/a3a74d141f2a/
Log: close branch to be merged
From pypy.commits at gmail.com Wed May 11 05:31:27 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 02:31:27 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge ufunc-outer which implements
numpypy.ufunc.outer
Message-ID: <5732fbef.c9b0c20a.a1e25.629b@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84371:0c3ac8d64955
Date: 2016-05-11 12:29 +0300
http://bitbucket.org/pypy/pypy/changeset/0c3ac8d64955/
Log: merge ufunc-outer which implements numpypy.ufunc.outer
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -443,7 +443,7 @@
'array does not have imaginary part to set')
self.implementation.set_imag(space, self, w_value)
- def reshape(self, space, w_shape, order):
+ def reshape(self, space, w_shape, order=NPY.ANYORDER):
new_shape = get_shape_from_iterable(space, self.get_size(), w_shape)
new_impl = self.implementation.reshape(self, new_shape, order)
if new_impl is not None:
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -1480,7 +1480,21 @@
def test_outer(self):
import numpy as np
- from numpy import absolute
+ c = np.multiply.outer([1, 2, 3], [4, 5, 6])
+ assert c.shape == (3, 3)
+ assert (c ==[[ 4, 5, 6],
+ [ 8, 10, 12],
+ [12, 15, 18]]).all()
+ A = np.array([[1, 2, 3], [4, 5, 6]])
+ B = np.array([[1, 2, 3, 4]])
+ c = np.multiply.outer(A, B)
+ assert c.shape == (2, 3, 1, 4)
+ assert (c == [[[[ 1, 2, 3, 4]],
+ [[ 2, 4, 6, 8]],
+ [[ 3, 6, 9, 12]]],
+ [[[ 4, 8, 12, 16]],
+ [[ 5, 10, 15, 20]],
+ [[ 6, 12, 18, 24]]]]).all()
exc = raises(ValueError, np.absolute.outer, [-1, -2])
assert exc.value[0] == 'outer product only supported for binary functions'
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -363,12 +363,18 @@
out = space.call_method(obj, '__array_wrap__', out, space.w_None)
return out
- def descr_outer(self, space, __args__):
- return self._outer(space, __args__)
-
- def _outer(self, space, __args__):
- raise oefmt(space.w_ValueError,
+ def descr_outer(self, space, args_w):
+ if self.nin != 2:
+ raise oefmt(space.w_ValueError,
"outer product only supported for binary functions")
+ if len(args_w) != 2:
+ raise oefmt(space.w_ValueError,
+ "exactly two arguments expected")
+ args = [convert_to_array(space, w_obj) for w_obj in args_w]
+ w_outshape = [space.wrap(i) for i in args[0].get_shape() + [1]*args[1].ndims()]
+ args0 = args[0].reshape(space, space.newtuple(w_outshape))
+ return self.descr_call(space, Arguments.frompacked(space,
+ space.newlist([args0, args[1]])))
def parse_kwargs(self, space, kwds_w):
w_casting = kwds_w.pop('casting', None)
From pypy.commits at gmail.com Wed May 11 05:31:28 2016
From: pypy.commits at gmail.com (mattip)
Date: Wed, 11 May 2016 02:31:28 -0700 (PDT)
Subject: [pypy-commit] pypy default: document merged branch
Message-ID: <5732fbf0.109a1c0a.326c.ffff8237@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r84372:7d054c29c040
Date: 2016-05-11 12:30 +0300
http://bitbucket.org/pypy/pypy/changeset/7d054c29c040/
Log: document merged branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -89,3 +89,7 @@
Use the new rgc.FinalizerQueue mechanism to clean up the handling of
``__del__`` methods. Fixes notably issue #2287. (All RPython
subclasses of W_Root need to use FinalizerQueue now.)
+
+.. branch: ufunc-outer
+
+Implement ufunc.outer on numpypy
From pypy.commits at gmail.com Wed May 11 09:26:51 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 06:26:51 -0700 (PDT)
Subject: [pypy-commit] pypy default: A minimal test for algo.regalloc,
independent on the tests from jit.codewriter
Message-ID: <5733331b.10691c0a.62ac.0694@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r84373:71480708fb61
Date: 2016-05-11 15:27 +0200
http://bitbucket.org/pypy/pypy/changeset/71480708fb61/
Log: A minimal test for algo.regalloc, independent on the tests from
jit.codewriter
diff --git a/rpython/tool/algo/test/test_regalloc.py b/rpython/tool/algo/test/test_regalloc.py
new file mode 100644
--- /dev/null
+++ b/rpython/tool/algo/test/test_regalloc.py
@@ -0,0 +1,60 @@
+from rpython.rtyper.test.test_llinterp import gengraph
+from rpython.rtyper.lltypesystem import lltype
+from rpython.tool.algo.regalloc import perform_register_allocation
+from rpython.flowspace.model import Variable
+from rpython.conftest import option
+
+
+def is_int(v):
+ return v.concretetype == lltype.Signed
+
+def check_valid(graph, regalloc, consider_var):
+ if getattr(option, 'view', False):
+ graph.show()
+ num_renamings = 0
+ for block in graph.iterblocks():
+ inputs = [v for v in block.inputargs if consider_var(v)]
+ colors = [regalloc.getcolor(v) for v in inputs]
+ print inputs, ':', colors
+ assert len(inputs) == len(set(colors))
+ in_use = dict(zip(colors, inputs))
+ for op in block.operations:
+ for v in op.args:
+ if isinstance(v, Variable) and consider_var(v):
+ assert in_use[regalloc.getcolor(v)] is v
+ if consider_var(op.result):
+ in_use[regalloc.getcolor(op.result)] = op.result
+ for link in block.exits:
+ for i, v in enumerate(link.args):
+ if consider_var(v):
+ assert in_use[regalloc.getcolor(v)] is v
+ w = link.target.inputargs[i]
+ if regalloc.getcolor(v) is not regalloc.getcolor(w):
+ print '\trenaming %s:%d -> %s:%d' % (
+ v, regalloc.getcolor(v), w, regalloc.getcolor(w))
+ num_renamings += 1
+ return num_renamings
+
+
+def test_loop_1():
+ def f(a, b):
+ while a > 0:
+ b += a
+ a -= 1
+ return b
+ t, rtyper, graph = gengraph(f, [int, int], viewbefore=False)
+ regalloc = perform_register_allocation(graph, is_int)
+ num_renamings = check_valid(graph, regalloc, is_int)
+ assert num_renamings == 0
+
+def test_loop_2():
+ def f(a, b):
+ while a > 0:
+ b += a
+ if b < 10:
+ a, b = b, a
+ a -= 1
+ return b
+ t, rtyper, graph = gengraph(f, [int, int], viewbefore=False)
+ regalloc = perform_register_allocation(graph, is_int)
+ check_valid(graph, regalloc, is_int)
From pypy.commits at gmail.com Wed May 11 10:15:37 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 07:15:37 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: Yet another attempt at
improving shadowstack
Message-ID: <57333e89.49961c0a.8f7ae.00df@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84374:8db5e75d15b2
Date: 2016-05-11 15:48 +0200
http://bitbucket.org/pypy/pypy/changeset/8db5e75d15b2/
Log: Yet another attempt at improving shadowstack
From pypy.commits at gmail.com Wed May 11 10:15:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 07:15:39 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: in-progress: starting with
tests
Message-ID: <57333e8b.0c2e1c0a.bc1ee.042c@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84375:76c1df47dbe3
Date: 2016-05-11 16:15 +0200
http://bitbucket.org/pypy/pypy/changeset/76c1df47dbe3/
Log: in-progress: starting with tests
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -609,6 +609,9 @@
"the custom trace hook %r for %r can cause "
"the GC to be called!" % (func, TP))
+ def postprocess_graph(self, graph):
+ self.root_walker.postprocess_graph(self, graph)
+
def consider_constant(self, TYPE, value):
self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
new file mode 100644
--- /dev/null
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -0,0 +1,35 @@
+
+
+def find_interesting_variables(graph):
+ # Decide which variables are "interesting" or not. Interesting
+ # variables contain at least the ones that appear in gc_push_roots
+ # and gc_pop_roots.
+ pending = []
+ interesting_vars = set()
+ for block in graph.iterblocks():
+ for op in block.operations:
+ if op.opname == 'gc_push_roots':
+ for v in op.args:
+ interesting_vars.add(v)
+ pending.append((block, v))
+ elif op.opname == 'gc_pop_roots':
+ for v in op.args:
+ assert v in interesting_vars # must be pushed just above
+ if not interesting_vars:
+ return
+
+ # If there is a path from a gc_pop_roots(v) to a subsequent
+ # gc_push_roots(w) where w contains the same value as v along that
+ # path, then we consider all intermediate blocks along that path
+ # which contain a copy of the same value, and add these variables
+ # as "interesting", too.
+
+ #....
+ return interesting_vars
+
+
+def postprocess_graph(gct, graph):
+ """Collect information about the gc_push_roots and gc_pop_roots
+ added in this complete graph, and replace them with real operations.
+ """
+ xxxx
diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py
--- a/rpython/memory/gctransform/shadowstack.py
+++ b/rpython/memory/gctransform/shadowstack.py
@@ -31,28 +31,13 @@
self.num_pushs += len(livevars)
if not livevars:
return []
- c_len = rmodel.inputconst(lltype.Signed, len(livevars) )
- base_addr = hop.genop("direct_call", [self.incr_stack_ptr, c_len ],
- resulttype=llmemory.Address)
- for k,var in enumerate(livevars):
- c_k = rmodel.inputconst(lltype.Signed, k * sizeofaddr)
- v_adr = gen_cast(hop.llops, llmemory.Address, var)
- hop.genop("raw_store", [base_addr, c_k, v_adr])
+ hop.genop("gc_push_roots", livevars)
return livevars
def pop_roots(self, hop, livevars):
if not livevars:
return
- c_len = rmodel.inputconst(lltype.Signed, len(livevars) )
- base_addr = hop.genop("direct_call", [self.decr_stack_ptr, c_len ],
- resulttype=llmemory.Address)
- if self.gcdata.gc.moving_gc:
- # for moving collectors, reload the roots into the local variables
- for k,var in enumerate(livevars):
- c_k = rmodel.inputconst(lltype.Signed, k * sizeofaddr)
- v_newaddr = hop.genop("raw_load", [base_addr, c_k],
- resulttype=llmemory.Address)
- hop.genop("gc_reload_possibly_moved", [v_newaddr, var])
+ hop.genop("gc_pop_roots", livevars)
class ShadowStackRootWalker(BaseRootWalker):
@@ -222,6 +207,10 @@
from rpython.rlib import _stacklet_shadowstack
_stacklet_shadowstack.complete_destrptr(gctransformer)
+ def postprocess_graph(self, gct, graph):
+ from rpython.memory.gctransform import shadowcolor
+ shadowcolor.postprocess_graph(gct, graph)
+
# ____________________________________________________________
class ShadowStackPool(object):
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
new file mode 100644
--- /dev/null
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -0,0 +1,55 @@
+from rpython.rtyper.lltypesystem import lltype, llmemory
+from rpython.rtyper.lltypesystem.lloperation import llop
+from rpython.rtyper.test.test_llinterp import gengraph
+from rpython.conftest import option
+from rpython.memory.gctransform.shadowcolor import find_interesting_variables
+
+
+def make_graph(f, argtypes):
+ t, rtyper, graph = gengraph(f, argtypes, viewbefore=False)
+ if getattr(option, 'view', False):
+ graph.show()
+ return graph
+
+def summary(interesting_vars):
+ result = {}
+ for v in interesting_vars:
+ name = v._name.rstrip('_')
+ result[name] = result.get(name, 0) + 1
+ return result
+
+
+def test_interesting_vars_0():
+ def f(a, b):
+ pass
+ graph = make_graph(f, [llmemory.GCREF, int])
+ assert not find_interesting_variables(graph)
+
+def test_interesting_vars_1():
+ def f(a, b):
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ graph = make_graph(f, [llmemory.GCREF, int])
+ assert summary(find_interesting_variables(graph)) == {'a': 1}
+
+def test_interesting_vars_2():
+ def f(a, b, c):
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ while b > 0:
+ b -= 5
+ llop.gc_push_roots(lltype.Void, c)
+ llop.gc_pop_roots(lltype.Void, c)
+ graph = make_graph(f, [llmemory.GCREF, int, llmemory.GCREF])
+ assert summary(find_interesting_variables(graph)) == {'a': 1, 'c': 1}
+
+def test_interesting_vars_3():
+ def f(a, b):
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ while b > 0: # 'a' remains interesting across the blocks of this loop
+ b -= 5
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ graph = make_graph(f, [llmemory.GCREF, int])
+ assert summary(find_interesting_variables(graph)) == {'a': 4}
diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py
--- a/rpython/memory/gctransform/transform.py
+++ b/rpython/memory/gctransform/transform.py
@@ -236,6 +236,8 @@
else:
insert_empty_block(link, llops)
+ self.postprocess_graph(graph)
+
# remove the empty block at the start of the graph, which should
# still be empty (but let's check)
if starts_with_empty_block(graph) and inserted_empty_startblock:
@@ -252,6 +254,9 @@
graph.exc_cleanup = (v, list(llops))
return is_borrowed # xxx for tests only
+ def postprocess_graph(self, graph):
+ pass
+
def annotate_helper(self, ll_helper, ll_args, ll_result, inline=False):
assert not self.finished_helpers
args_s = map(lltype_to_annotation, ll_args)
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -513,6 +513,9 @@
'gc_rawrefcount_from_obj': LLOp(sideeffects=False),
'gc_rawrefcount_to_obj': LLOp(sideeffects=False),
+ 'gc_push_roots' : LLOp(),
+ 'gc_pop_roots' : LLOp(),
+
# ------- JIT & GC interaction, only for some GCs ----------
'gc_adr_of_nursery_free' : LLOp(),
diff --git a/rpython/tool/algo/test/test_regalloc.py b/rpython/tool/algo/test/test_regalloc.py
--- a/rpython/tool/algo/test/test_regalloc.py
+++ b/rpython/tool/algo/test/test_regalloc.py
@@ -57,4 +57,5 @@
return b
t, rtyper, graph = gengraph(f, [int, int], viewbefore=False)
regalloc = perform_register_allocation(graph, is_int)
- check_valid(graph, regalloc, is_int)
+ num_renamings = check_valid(graph, regalloc, is_int)
+ assert num_renamings == 2
From pypy.commits at gmail.com Wed May 11 10:47:00 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 07:47:00 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: tests, code
Message-ID: <573345e4.8455c20a.4f164.ffffeb51@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84376:142d9abed9dd
Date: 2016-05-11 16:47 +0200
http://bitbucket.org/pypy/pypy/changeset/142d9abed9dd/
Log: tests, code
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -1,28 +1,68 @@
+from rpython.flowspace.model import mkentrymap, Variable
+
+
+def is_trivial_rewrite(op):
+ return op.opname in ('same_as', 'cast_pointer', 'cast_opaque_ptr')
+
+
+def find_precessors(graph, pending_pred):
+ """Return the set of variables whose content can end up inside one
+ of the 'pending_pred', which is a list of (block, var) tuples.
+ """
+ entrymap = mkentrymap(graph)
+ pred = set([v for block, v in pending_pred])
+
+ def add(block, v):
+ if isinstance(v, Variable):
+ if v not in pred:
+ pending_pred.append((block, v))
+ pred.add(v)
+
+ while pending_pred:
+ block, v = pending_pred.pop()
+ if v in block.inputargs:
+ var_index = block.inputargs.index(v)
+ for link in entrymap[block]:
+ prevblock = link.prevblock
+ if prevblock is not None:
+ add(prevblock, link.args[var_index])
+ else:
+ for op in block.operations:
+ if op.result is v:
+ if is_trivial_rewrite(op):
+ add(block, op.args[0])
+ break
+ return pred
def find_interesting_variables(graph):
# Decide which variables are "interesting" or not. Interesting
# variables contain at least the ones that appear in gc_push_roots
# and gc_pop_roots.
- pending = []
+ pending_pred = []
+ pending_succ = []
interesting_vars = set()
for block in graph.iterblocks():
for op in block.operations:
if op.opname == 'gc_push_roots':
for v in op.args:
interesting_vars.add(v)
- pending.append((block, v))
+ pending_pred.append((block, v))
elif op.opname == 'gc_pop_roots':
for v in op.args:
assert v in interesting_vars # must be pushed just above
- if not interesting_vars:
- return
+ pending_succ.append((block, v))
# If there is a path from a gc_pop_roots(v) to a subsequent
# gc_push_roots(w) where w contains the same value as v along that
# path, then we consider all intermediate blocks along that path
# which contain a copy of the same value, and add these variables
- # as "interesting", too.
+ # as "interesting", too. Formally, a variable in a block is
+ # "interesting" if it is both a "predecessor" and a "successor",
+ # where predecessors are variables which (sometimes) end in a
+ # gc_push_roots, and successors are variables which (sometimes)
+ # come from a gc_pop_roots.
+
#....
return interesting_vars
diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py
--- a/rpython/memory/gctransform/shadowstack.py
+++ b/rpython/memory/gctransform/shadowstack.py
@@ -29,15 +29,11 @@
def push_roots(self, hop, keep_current_args=False):
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
- if not livevars:
- return []
- hop.genop("gc_push_roots", livevars)
+ hop.genop("gc_push_roots", livevars) # even if len(livevars) == 0
return livevars
def pop_roots(self, hop, livevars):
- if not livevars:
- return
- hop.genop("gc_pop_roots", livevars)
+ hop.genop("gc_pop_roots", livevars) # even if len(livevars) == 0
class ShadowStackRootWalker(BaseRootWalker):
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -2,7 +2,7 @@
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.test.test_llinterp import gengraph
from rpython.conftest import option
-from rpython.memory.gctransform.shadowcolor import find_interesting_variables
+from rpython.memory.gctransform.shadowcolor import *
def make_graph(f, argtypes):
@@ -19,6 +19,53 @@
return result
+def test_find_predecessors_1():
+ def f(a, b):
+ c = a + b
+ return c
+ graph = make_graph(f, [int, int])
+ pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ assert summary(pred) == {'c': 1, 'v': 1}
+
+def test_find_predecessors_2():
+ def f(a, b):
+ c = a + b
+ while a > 0:
+ a -= 2
+ return c
+ graph = make_graph(f, [int, int])
+ pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ assert summary(pred) == {'c': 3, 'v': 1}
+
+def test_find_predecessors_3():
+ def f(a, b):
+ while b > 100:
+ b -= 2
+ if b > 10:
+ c = a + b # 'c' created in this block
+ else:
+ c = a - b # 'c' created in this block
+ return c # 'v' is the return var
+ graph = make_graph(f, [int, int])
+ pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ assert summary(pred) == {'c': 2, 'v': 1}
+
+def test_find_predecessors_4():
+ def f(a, b): # 'a' in the input block
+ while b > 100: # 'a' in the loop header block
+ b -= 2 # 'a' in the loop body block
+ if b > 10: # 'a' in the condition block
+ while b > 5: # nothing
+ b -= 2 # nothing
+ c = a + b # 'c' created in this block
+ else:
+ c = a
+ return c # 'v' is the return var
+ graph = make_graph(f, [int, int])
+ pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ assert summary(pred) == {'a': 4, 'c': 1, 'v': 1}
+
+
def test_interesting_vars_0():
def f(a, b):
pass
From pypy.commits at gmail.com Wed May 11 10:57:02 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 07:57:02 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: find_successors()
Message-ID: <5733483e.230ec20a.d38cd.ffffe1ff@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84377:1fbf31cc03dc
Date: 2016-05-11 16:55 +0200
http://bitbucket.org/pypy/pypy/changeset/1fbf31cc03dc/
Log: find_successors()
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -35,6 +35,30 @@
return pred
+def find_successors(graph, pending_succ):
+ """Return the set of variables where one of the 'pending_succ' can
+ end up. 'block_succ' is a list of (block, var) tuples.
+ """
+ succ = set([v for block, v in pending_succ])
+
+ def add(block, v):
+ if isinstance(v, Variable):
+ if v not in succ:
+ pending_succ.append((block, v))
+ succ.add(v)
+
+ while pending_succ:
+ block, v = pending_succ.pop()
+ for op in block.operations:
+ if op.args and v is op.args[0] and is_trivial_rewrite(op):
+ add(block, op.result)
+ for link in block.exits:
+ for i, v1 in enumerate(link.args):
+ if v1 is v:
+ add(link.target, link.target.inputargs[i])
+ return succ
+
+
def find_interesting_variables(graph):
# Decide which variables are "interesting" or not. Interesting
# variables contain at least the ones that appear in gc_push_roots
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -65,6 +65,36 @@
pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
assert summary(pred) == {'a': 4, 'c': 1, 'v': 1}
+def test_find_successors_1():
+ def f(a, b):
+ return a + b
+ graph = make_graph(f, [int, int])
+ succ = find_successors(graph, [(graph.startblock, graph.getargs()[0])])
+ assert summary(succ) == {'a': 1}
+
+def test_find_successors_2():
+ def f(a, b):
+ if b > 10:
+ return a + b
+ else:
+ return a - b
+ graph = make_graph(f, [int, int])
+ succ = find_successors(graph, [(graph.startblock, graph.getargs()[0])])
+ assert summary(succ) == {'a': 3}
+
+def test_find_successors_3():
+ def f(a, b):
+ if b > 10: # 'a' condition block
+ a = a + b # 'a' input
+ while b > 100:
+ b -= 2
+ while b > 5: # 'a' in loop header
+ b -= 2 # 'a' in loop body
+ return a * b # 'a' in product
+ graph = make_graph(f, [int, int])
+ succ = find_successors(graph, [(graph.startblock, graph.getargs()[0])])
+ assert summary(succ) == {'a': 5}
+
def test_interesting_vars_0():
def f(a, b):
From pypy.commits at gmail.com Wed May 11 10:57:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 07:57:04 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: Pass test_interesting_vars_3.
Message-ID: <57334840.2457c20a.74bca.fffff889@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84378:be524ccde2f4
Date: 2016-05-11 16:57 +0200
http://bitbucket.org/pypy/pypy/changeset/be524ccde2f4/
Log: Pass test_interesting_vars_3.
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -5,7 +5,7 @@
return op.opname in ('same_as', 'cast_pointer', 'cast_opaque_ptr')
-def find_precessors(graph, pending_pred):
+def find_predecessors(graph, pending_pred):
"""Return the set of variables whose content can end up inside one
of the 'pending_pred', which is a list of (block, var) tuples.
"""
@@ -86,9 +86,10 @@
# where predecessors are variables which (sometimes) end in a
# gc_push_roots, and successors are variables which (sometimes)
# come from a gc_pop_roots.
+ pred = find_predecessors(graph, pending_pred)
+ succ = find_successors(graph, pending_succ)
+ interesting_vars |= (pred & succ)
-
- #....
return interesting_vars
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -24,7 +24,7 @@
c = a + b
return c
graph = make_graph(f, [int, int])
- pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ pred = find_predecessors(graph, [(graph.returnblock, graph.getreturnvar())])
assert summary(pred) == {'c': 1, 'v': 1}
def test_find_predecessors_2():
@@ -34,7 +34,7 @@
a -= 2
return c
graph = make_graph(f, [int, int])
- pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ pred = find_predecessors(graph, [(graph.returnblock, graph.getreturnvar())])
assert summary(pred) == {'c': 3, 'v': 1}
def test_find_predecessors_3():
@@ -47,7 +47,7 @@
c = a - b # 'c' created in this block
return c # 'v' is the return var
graph = make_graph(f, [int, int])
- pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ pred = find_predecessors(graph, [(graph.returnblock, graph.getreturnvar())])
assert summary(pred) == {'c': 2, 'v': 1}
def test_find_predecessors_4():
@@ -62,7 +62,7 @@
c = a
return c # 'v' is the return var
graph = make_graph(f, [int, int])
- pred = find_precessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ pred = find_predecessors(graph, [(graph.returnblock, graph.getreturnvar())])
assert summary(pred) == {'a': 4, 'c': 1, 'v': 1}
def test_find_successors_1():
From pypy.commits at gmail.com Wed May 11 11:03:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 08:03:57 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: Test that
is_trivial_rewrite() is correctly used
Message-ID: <573349dd.161b1c0a.70e6d.1552@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84379:e0900fa2a687
Date: 2016-05-11 17:04 +0200
http://bitbucket.org/pypy/pypy/changeset/e0900fa2a687/
Log: Test that is_trivial_rewrite() is correctly used
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -65,6 +65,19 @@
pred = find_predecessors(graph, [(graph.returnblock, graph.getreturnvar())])
assert summary(pred) == {'a': 4, 'c': 1, 'v': 1}
+def test_find_predecessors_trivial_rewrite():
+ def f(a, b): # 'b' in empty startblock
+ while a > 100: # 'b'
+ a -= 2 # 'b'
+ c = llop.same_as(lltype.Signed, b) # 'c', 'b'
+ while b > 10: # 'c'
+ b -= 2 # 'c'
+ d = llop.same_as(lltype.Signed, c) # 'd', 'c'
+ return d # 'v' is the return var
+ graph = make_graph(f, [int, int])
+ pred = find_predecessors(graph, [(graph.returnblock, graph.getreturnvar())])
+ assert summary(pred) == {'b': 4, 'c': 4, 'd': 1, 'v': 1}
+
def test_find_successors_1():
def f(a, b):
return a + b
@@ -95,6 +108,19 @@
succ = find_successors(graph, [(graph.startblock, graph.getargs()[0])])
assert summary(succ) == {'a': 5}
+def test_find_successors_trivial_rewrite():
+ def f(a, b): # 'b' in empty startblock
+ while a > 100: # 'b'
+ a -= 2 # 'b'
+ c = llop.same_as(lltype.Signed, b) # 'c', 'b'
+ while b > 10: # 'c', 'b'
+ b -= 2 # 'c', 'b'
+ d = llop.same_as(lltype.Signed, c) # 'd', 'c'
+ return d # 'v' is the return var
+ graph = make_graph(f, [int, int])
+ pred = find_successors(graph, [(graph.startblock, graph.getargs()[1])])
+ assert summary(pred) == {'b': 6, 'c': 4, 'd': 1, 'v': 1}
+
def test_interesting_vars_0():
def f(a, b):
From pypy.commits at gmail.com Wed May 11 11:23:08 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 08:23:08 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: many more tests for three
extra lines of code
Message-ID: <57334e5c.0f801c0a.8f688.3eda@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84380:9584b8299e11
Date: 2016-05-11 17:23 +0200
http://bitbucket.org/pypy/pypy/changeset/9584b8299e11/
Log: many more tests for three extra lines of code
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -1,4 +1,5 @@
from rpython.flowspace.model import mkentrymap, Variable
+from rpython.tool.algo.regalloc import perform_register_allocation
def is_trivial_rewrite(op):
@@ -93,6 +94,12 @@
return interesting_vars
+def allocate_registers(graph):
+ interesting_vars = find_interesting_variables(graph)
+ regalloc = perform_register_allocation(graph, interesting_vars.__contains__)
+ return regalloc
+
+
def postprocess_graph(gct, graph):
"""Collect information about the gc_push_roots and gc_pop_roots
added in this complete graph, and replace them with real operations.
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -11,13 +11,30 @@
graph.show()
return graph
+def nameof(v):
+ return v._name.rstrip('_')
+
def summary(interesting_vars):
result = {}
for v in interesting_vars:
- name = v._name.rstrip('_')
+ name = nameof(v)
result[name] = result.get(name, 0) + 1
return result
+def summary_regalloc(regalloc):
+ result = []
+ for block in regalloc.graph.iterblocks():
+ print block.inputargs
+ for op in block.operations:
+ print '\t', op
+ blockvars = block.inputargs + [op.result for op in block.operations]
+ for v in blockvars:
+ if regalloc.consider_var(v):
+ result.append((nameof(v), regalloc.getcolor(v)))
+ print '\t\t%s: %s' % (v, regalloc.getcolor(v))
+ result.sort()
+ return result
+
def test_find_predecessors_1():
def f(a, b):
@@ -156,3 +173,72 @@
llop.gc_pop_roots(lltype.Void, a)
graph = make_graph(f, [llmemory.GCREF, int])
assert summary(find_interesting_variables(graph)) == {'a': 4}
+
+def test_allocate_registers_1():
+ def f(a, b):
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ while b > 0: # 'a' remains interesting across the blocks of this loop
+ b -= 5
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ graph = make_graph(f, [llmemory.GCREF, int])
+ regalloc = allocate_registers(graph)
+ assert summary_regalloc(regalloc) == [('a', 0)] * 4
+
+def test_allocate_registers_2():
+ def f(a, b, c):
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ while b > 0:
+ b -= 5
+ llop.gc_push_roots(lltype.Void, c)
+ llop.gc_pop_roots(lltype.Void, c)
+ graph = make_graph(f, [llmemory.GCREF, int, llmemory.GCREF])
+ regalloc = allocate_registers(graph)
+ assert summary_regalloc(regalloc) == [('a', 0), ('c', 0)]
+
+def test_allocate_registers_3():
+ def f(a, b, c):
+ llop.gc_push_roots(lltype.Void, c, a)
+ llop.gc_pop_roots(lltype.Void, c, a)
+ while b > 0:
+ b -= 5
+ llop.gc_push_roots(lltype.Void, a)
+ llop.gc_pop_roots(lltype.Void, a)
+ graph = make_graph(f, [llmemory.GCREF, int, llmemory.GCREF])
+ regalloc = allocate_registers(graph)
+ assert summary_regalloc(regalloc) == [('a', 1)] * 4 + [('c', 0)]
+
+def test_allocate_registers_4():
+ def g(a, x):
+ return x # (or something different)
+ def f(a, b, c):
+ llop.gc_push_roots(lltype.Void, a, c) # 'a', 'c'
+ llop.gc_pop_roots(lltype.Void, a, c)
+ while b > 0: # 'a' only; 'c' not in push_roots
+ b -= 5
+ llop.gc_push_roots(lltype.Void, a)# 'a'
+ d = g(a, c)
+ llop.gc_pop_roots(lltype.Void, a)
+ c = d
+ return c
+ graph = make_graph(f, [llmemory.GCREF, int, llmemory.GCREF])
+ regalloc = allocate_registers(graph)
+ assert summary_regalloc(regalloc) == [('a', 1)] * 3 + [('c', 0)]
+
+def test_allocate_registers_5():
+ def g(a, x):
+ return x # (or something different)
+ def f(a, b, c):
+ while b > 0: # 'a', 'c'
+ b -= 5
+ llop.gc_push_roots(lltype.Void, a, c) # 'a', 'c'
+ g(a, c)
+ llop.gc_pop_roots(lltype.Void, a, c)
+ while b < 10:
+ b += 2
+ return c
+ graph = make_graph(f, [llmemory.GCREF, int, llmemory.GCREF])
+ regalloc = allocate_registers(graph)
+ assert summary_regalloc(regalloc) == [('a', 1)] * 2 + [('c', 0)] * 2
diff --git a/rpython/tool/algo/regalloc.py b/rpython/tool/algo/regalloc.py
--- a/rpython/tool/algo/regalloc.py
+++ b/rpython/tool/algo/regalloc.py
@@ -94,8 +94,8 @@
self._try_coalesce(v, link.target.inputargs[i])
def _try_coalesce(self, v, w):
- if isinstance(v, Variable) and self.consider_var(v):
- assert self.consider_var(w)
+ if isinstance(v, Variable) and self.consider_var(v) \
+ and self.consider_var(w):
dg = self._depgraph
uf = self._unionfind
v0 = uf.find_rep(v)
From pypy.commits at gmail.com Wed May 11 12:18:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 09:18:39 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: in-progress
Message-ID: <57335b5f.43ecc20a.69786.12c6@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84381:94fec9f874f1
Date: 2016-05-11 18:18 +0200
http://bitbucket.org/pypy/pypy/changeset/94fec9f874f1/
Log: in-progress
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -100,8 +100,35 @@
return regalloc
+def move_pushes_earlier(graph):
+ """gc_push_roots and gc_pop_roots are pushes/pops to the shadowstack,
+ immediately enclosing the operation that needs them (typically a call).
+ Here, we try to move individual pushes earlier, in fact as early as
+ possible under the following conditions: we only move it across vars
+ that are 'interesting_vars'; and we stop when we encounter the
+ operation that produces the value, or when we encounter a gc_pop_roots
+ that pops off the same stack location. In the latter case, if that
+ gc_pop_roots pops the same value out of the same stack location, then
+ success: we can remove the gc_push_root on that path.
+
+ If the process succeeds to remove the gc_push_root along at least
+ one path, we generate it explicitly on the other paths, and we
+ remove the original gc_push_root. If the process doesn't succeed
+ in doing any such removal, we don't do anything.
+
+ Note that it would be possible to do exactly the same in the
+ opposite direction by exchanging the roles of "push/earlier" and
+ "pop/later". I think doing both is pointless---one direction is
+ enough. The direction we chose here keeps gc_pop_roots unmodified.
+ The C compiler should be better at discarding them if unused.
+ """
+
+ x.x.x.x
+
+
def postprocess_graph(gct, graph):
"""Collect information about the gc_push_roots and gc_pop_roots
added in this complete graph, and replace them with real operations.
"""
+ regalloc = allocate_registers(graph)
xxxx
diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py
--- a/rpython/memory/gctransform/shadowstack.py
+++ b/rpython/memory/gctransform/shadowstack.py
@@ -33,7 +33,7 @@
return livevars
def pop_roots(self, hop, livevars):
- hop.genop("gc_pop_roots", livevars) # even if len(livevars) == 0
+ hop.genop("gc_pop_roots", list(livevars)) # even if len(livevars) == 0
class ShadowStackRootWalker(BaseRootWalker):
From pypy.commits at gmail.com Wed May 11 12:18:55 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Wed, 11 May 2016 09:18:55 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: merge default
Message-ID: <57335b6f.81da1c0a.338e7.347c@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r84382:608b83492e8d
Date: 2016-05-11 18:17 +0200
http://bitbucket.org/pypy/pypy/changeset/608b83492e8d/
Log: merge default
diff too long, truncating to 2000 out of 54659 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,5 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -111,23 +111,24 @@
Simon Burton
Martin Matusiak
Konstantin Lopuhin
+ Stefano Rivera
Wenzhu Man
John Witulski
Laurence Tratt
Ivan Sichmann Freitas
Greg Price
Dario Bertini
- Stefano Rivera
Mark Pearse
Simon Cross
+ Edd Barrett
Andreas Stührk
- Edd Barrett
Jean-Philippe St. Pierre
Guido van Rossum
Pavel Vinogradov
+ Spenser Bauman
Jeremy Thurgood
Paweł Piotr Przeradowski
- Spenser Bauman
+ Tobias Pape
Paul deGrandis
Ilya Osadchiy
marky1991
@@ -139,7 +140,7 @@
Georg Brandl
Bert Freudenberg
Stian Andreassen
- Tobias Pape
+ Mark Young
Wanja Saatkamp
Gerald Klix
Mike Blume
@@ -170,9 +171,9 @@
Yichao Yu
Rocco Moretti
Gintautas Miliauskas
+ Devin Jeanpierre
Michael Twomey
Lucian Branescu Mihaila
- Devin Jeanpierre
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -183,6 +184,7 @@
Victor Stinner
Andrews Medina
anatoly techtonik
+ Sergey Matyunin
Stuart Williams
Jasper Schulz
Christian Hudon
@@ -217,7 +219,6 @@
Arjun Naik
Valentina Mukhamedzhanova
Stefano Parmesan
- Mark Young
Alexis Daboville
Jens-Uwe Mager
Carl Meyer
@@ -225,7 +226,9 @@
Pieter Zieschang
Gabriel
Lukas Vacek
+ Kunal Grover
Andrew Dalke
+ Florin Papa
Sylvain Thenault
Jakub Stasiak
Nathan Taylor
@@ -240,7 +243,6 @@
Kristjan Valur Jonsson
David Lievens
Neil Blakey-Milner
- Sergey Matyunin
Lutz Paelike
Lucio Torre
Lars Wassermann
@@ -252,9 +254,11 @@
Artur Lisiecki
Sergey Kishchenko
Ignas Mikalajunas
+ Alecsandru Patrascu
Christoph Gerum
Martin Blais
Lene Wagner
+ Catalin Gabriel Manciu
Tomo Cocoa
Kim Jin Su
Toni Mattis
@@ -291,6 +295,7 @@
Akira Li
Gustavo Niemeyer
Stephan Busemann
+ florinpapa
Rafał Gałczyński
Matt Bogosian
Christian Muirhead
@@ -305,6 +310,7 @@
Boglarka Vezer
Chris Pressey
Buck Golemon
+ Diana Popa
Konrad Delong
Dinu Gherman
Chris Lambacher
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
- def __hash__(self):
- raise TypeError("deque objects are unhashable")
+ __hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -67,7 +67,8 @@
subvalue = subfield.ctype
fields[subname] = Field(subname,
relpos, subvalue._sizeofinstances(),
- subvalue, i, is_bitfield)
+ subvalue, i, is_bitfield,
+ inside_anon_field=fields[name])
else:
resnames.append(name)
names = resnames
@@ -77,13 +78,15 @@
class Field(object):
- def __init__(self, name, offset, size, ctype, num, is_bitfield):
+ def __init__(self, name, offset, size, ctype, num, is_bitfield,
+ inside_anon_field=None):
self.__dict__['name'] = name
self.__dict__['offset'] = offset
self.__dict__['size'] = size
self.__dict__['ctype'] = ctype
self.__dict__['num'] = num
self.__dict__['is_bitfield'] = is_bitfield
+ self.__dict__['inside_anon_field'] = inside_anon_field
def __setattr__(self, name, value):
raise AttributeError(name)
@@ -95,6 +98,8 @@
def __get__(self, obj, cls=None):
if obj is None:
return self
+ if self.inside_anon_field is not None:
+ return getattr(self.inside_anon_field.__get__(obj), self.name)
if self.is_bitfield:
# bitfield member, use direct access
return obj._buffer.__getattr__(self.name)
@@ -105,6 +110,9 @@
return fieldtype._CData_output(suba, obj, offset)
def __set__(self, obj, value):
+ if self.inside_anon_field is not None:
+ setattr(self.inside_anon_field.__get__(obj), self.name, value)
+ return
fieldtype = self.ctype
cobj = fieldtype.from_param(value)
key = keepalive_key(self.num)
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
- return struct_rusage((
- float(c_struct.ru_utime),
- float(c_struct.ru_stime),
- c_struct.ru_maxrss,
- c_struct.ru_ixrss,
- c_struct.ru_idrss,
- c_struct.ru_isrss,
- c_struct.ru_minflt,
- c_struct.ru_majflt,
- c_struct.ru_nswap,
- c_struct.ru_inblock,
- c_struct.ru_oublock,
- c_struct.ru_msgsnd,
- c_struct.ru_msgrcv,
- c_struct.ru_nsignals,
- c_struct.ru_nvcsw,
- c_struct.ru_nivcsw))
def wait3(options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait3(status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
def wait4(pid, options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait4(pid, status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+ for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include
+#include
+#include
+#include
+
+static const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+ { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+ return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+ return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+ struct rlimit rl;
+ if (getrlimit(resource, &rl) == -1)
+ return -1;
+ result[0] = rl.rlim_cur;
+ result[1] = rl.rlim_max;
+ return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+ struct rlimit rl;
+ rl.rlim_cur = cur & RLIM_INFINITY;
+ rl.rlim_max = max & RLIM_INFINITY;
+ return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+ ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.5.2
+Version: 1.6.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.5.2"
-__version_info__ = (1, 5, 2)
+__version__ = "1.6.0"
+__version_info__ = (1, 6, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.5.2"
+ "\ncompiled with cffi version: 1.6.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -299,6 +299,23 @@
"""
return self._backend.string(cdata, maxlen)
+ def unpack(self, cdata, length):
+ """Unpack an array of C data of the given length,
+ returning a Python string/unicode/list.
+
+ If 'cdata' is a pointer to 'char', returns a byte string.
+ It does not stop at the first null. This is equivalent to:
+ ffi.buffer(cdata, length)[:]
+
+ If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
+ 'length' is measured in wchar_t's; it is not the size in bytes.
+
+ If 'cdata' is a pointer to anything else, returns a list of
+ 'length' items. This is a faster equivalent to:
+ [cdata[i] for i in range(length)]
+ """
+ return self._backend.unpack(cdata, length)
+
def buffer(self, cdata, size=-1):
"""Return a read-write buffer object that references the raw C data
pointed to by the given 'cdata'. The 'cdata' must be a pointer or
@@ -380,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
@@ -721,6 +725,26 @@
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
+ def list_types(self):
+ """Returns the user type names known to this FFI instance.
+ This returns a tuple containing three lists of names:
+ (typedef_names, names_of_structs, names_of_unions)
+ """
+ typedefs = []
+ structs = []
+ unions = []
+ for key in self._parser._declarations:
+ if key.startswith('typedef '):
+ typedefs.append(key[8:])
+ elif key.startswith('struct '):
+ structs.append(key[7:])
+ elif key.startswith('union '):
+ unions.append(key[6:])
+ typedefs.sort()
+ structs.sort()
+ unions.sort()
+ return (typedefs, structs, unions)
+
def _load_backend_lib(backend, name, flags):
if name is None:
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -88,6 +89,12 @@
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
+ #
+ # input: `extern "Python+C" int foo(int);`
+ # output:
+ # void __cffi_extern_python_plus_c_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
- parts.append('void __cffi_extern_python_start; ')
+ if 'C' in match.group(1):
+ parts.append('void __cffi_extern_python_plus_c_start; ')
+ else:
+ parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
break
#
try:
- self._inside_extern_python = False
+ self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -376,8 +386,10 @@
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
- elif self._inside_extern_python:
+ elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
+ elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+ tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
- self._inside_extern_python = not self._inside_extern_python
- assert self._inside_extern_python == (
- decl.name == '__cffi_extern_python_start')
+ self._inside_extern_python = decl.name
else:
- if self._inside_extern_python:
+ if self._inside_extern_python !='__cffi_extern_python_stop':
raise api.CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -1145,11 +1145,11 @@
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
+ _generate_cpy_dllexport_python_collecttype = \
+ _generate_cpy_extern_python_plus_c_collecttype = \
+ _generate_cpy_extern_python_collecttype
- def _generate_cpy_dllexport_python_collecttype(self, tp, name):
- self._generate_cpy_extern_python_collecttype(tp, name)
-
- def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+ def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
@@ -1184,11 +1184,7 @@
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
- if dllexport:
- tag = 'CFFI_DLLEXPORT'
- else:
- tag = 'static'
- prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+ prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
@@ -1206,8 +1202,14 @@
prnt()
self._num_externpy += 1
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ self._extern_python_decl(tp, name, 'static ')
+
def _generate_cpy_dllexport_python_decl(self, tp, name):
- self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+ self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+ def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+ self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
@@ -1220,8 +1222,9 @@
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
- def _generate_cpy_dllexport_python_ctx(self, tp, name):
- self._generate_cpy_extern_python_ctx(tp, name)
+ _generate_cpy_dllexport_python_ctx = \
+ _generate_cpy_extern_python_plus_c_ctx = \
+ _generate_cpy_extern_python_ctx
def _string_literal(self, s):
def _char_repr(c):
@@ -1231,7 +1234,7 @@
if c == '\n': return '\\n'
return '\\%03o' % ord(c)
lines = []
- for line in s.splitlines(True):
+ for line in s.splitlines(True) or ['']:
lines.append('"%s"' % ''.join([_char_repr(c) for c in line]))
return ' \\\n'.join(lines)
@@ -1319,7 +1322,9 @@
s = s.encode('ascii')
super(NativeIO, self).write(s)
-def _make_c_or_py_source(ffi, module_name, preamble, target_file):
+def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
+ if verbose:
+ print("generating %s" % (target_file,))
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
@@ -1331,6 +1336,8 @@
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
+ if verbose:
+ print("(already up-to-date)")
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
@@ -1343,12 +1350,14 @@
os.rename(tmp_file, target_file)
return True
-def make_c_source(ffi, module_name, preamble, target_c_file):
+def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
assert preamble is not None
- return _make_c_or_py_source(ffi, module_name, preamble, target_c_file)
+ return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
+ verbose)
-def make_py_source(ffi, module_name, target_py_file):
- return _make_c_or_py_source(ffi, module_name, None, target_py_file)
+def make_py_source(ffi, module_name, target_py_file, verbose=False):
+ return _make_c_or_py_source(ffi, module_name, None, target_py_file,
+ verbose)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
@@ -1438,7 +1447,8 @@
target = '*'
#
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
- updated = make_c_source(ffi, module_name, preamble, c_file)
+ updated = make_c_source(ffi, module_name, preamble, c_file,
+ verbose=compiler_verbose)
if call_c_compiler:
patchlist = []
cwd = os.getcwd()
@@ -1458,7 +1468,8 @@
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
- updated = make_py_source(ffi, module_name, c_file)
+ updated = make_py_source(ffi, module_name, c_file,
+ verbose=compiler_verbose)
if call_c_compiler:
return c_file
else:
@@ -1484,4 +1495,7 @@
def typeof_disabled(*args, **kwds):
raise NotImplementedError
ffi._typeof = typeof_disabled
+ for name in dir(ffi):
+ if not name.startswith('_') and not hasattr(module.ffi, name):
+ setattr(ffi, name, NotImplemented)
return module.lib
diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
- size = 32 if sys.maxint <= 2**32 else 64
- filename = '_%s_%s_.py' % (basename, size)
- dumpcache.dumpcache(__file__, filename, config)
- #
- filename = os.path.join(os.path.dirname(__file__),
- '_%s_cache.py' % (basename,))
- g = open(filename, 'w')
- print >> g, '''\
-import sys
-_size = 32 if sys.maxint <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
- globals(), locals(), ["*"])
-globals().update(_mod.__dict__)\
-''' % (basename,)
- g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
- ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
- 'LC_CTYPE',
- 'LC_TIME',
- 'LC_COLLATE',
- 'LC_MONETARY',
- 'LC_MESSAGES',
- 'LC_NUMERIC',
- 'LC_ALL',
- 'CHAR_MAX',
-]
-
-class LocaleConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
- 'locale.h'])
-for key in _CONSTANTS:
- setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
- if value is None:
- del config[key]
- _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
- # list of all possible names
- langinfo_names = [
- "RADIXCHAR", "THOUSEP", "CRNCYSTR",
- "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
- "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
- "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
- ]
- for i in range(1, 8):
- langinfo_names.append("DAY_%d" % i)
- langinfo_names.append("ABDAY_%d" % i)
- for i in range(1, 13):
- langinfo_names.append("MON_%d" % i)
- langinfo_names.append("ABMON_%d" % i)
-
- class LanginfoConfigure:
- _compilation_info_ = eci
- nl_item = SimpleType('nl_item')
- for key in langinfo_names:
- setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
- langinfo_config = configure(LanginfoConfigure)
- for key, value in langinfo_config.items():
- if value is None:
- del langinfo_config[key]
- langinfo_names.remove(key)
- config.update(langinfo_config)
- _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
- filename = os.path.join(_dirpath, name)
- d = {'__file__': filename}
- path = sys.path[:]
- try:
- sys.path.insert(0, _dirpath)
- execfile(filename, d)
- finally:
- sys.path[:] = path
-
-def try_rebuild():
- size = 32 if sys.maxint <= 2**32 else 64
- # remove the files '_*_size_.py'
- left = {}
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_%s_.py' % size) or
- p.endswith('_%s_.pyc' % size)):
- os.unlink(os.path.join(_dirpath, p))
- elif p.startswith('_') and (p.endswith('_.py') or
- p.endswith('_.pyc')):
- for i in range(2, len(p)-4):
- left[p[:i]] = True
- # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_cache.py') or
- p.endswith('_cache.pyc')):
- if p[:-9] not in left:
- os.unlink(os.path.join(_dirpath, p))
- #
- for p in os.listdir(_dirpath):
- if p.endswith('.ctc.py'):
- try:
- rebuild_one(p)
- except Exception, e:
- log.ERROR("Running %s:\n %s: %s" % (
- os.path.join(_dirpath, p),
- e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
- try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
- ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
- SimpleType)
-
-
-_CONSTANTS = (
- 'RLIM_INFINITY',
- 'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
- 'RLIMIT_CPU',
- 'RLIMIT_FSIZE',
- 'RLIMIT_DATA',
- 'RLIMIT_STACK',
- 'RLIMIT_CORE',
- 'RLIMIT_RSS',
- 'RLIMIT_NPROC',
- 'RLIMIT_NOFILE',
- 'RLIMIT_OFILE',
- 'RLIMIT_MEMLOCK',
- 'RLIMIT_AS',
- 'RLIMIT_LOCKS',
- 'RLIMIT_SIGPENDING',
- 'RLIMIT_MSGQUEUE',
- 'RLIMIT_NICE',
- 'RLIMIT_RTPRIO',
- 'RLIMIT_VMEM',
-
- 'RUSAGE_BOTH',
- 'RUSAGE_SELF',
- 'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
- rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
- setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
- setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
- if config[key] is not None:
- optional_constants.append(key)
- else:
- del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -1,4 +1,4 @@
-# ctypes implementation: Victor Stinner, 2008-05-08
+# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08
"""
This module provides access to the Unix password database.
It is available on all Unix versions.
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -1,15 +1,8 @@
-import sys
-if sys.platform == 'win32':
- raise ImportError('resource module not available for win32')
+"""http://docs.python.org/library/resource"""
-# load the platform-specific cache made by running resource.ctc.py
-from ctypes_config_cache._resource_cache import *
-
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, POINTER
+from _resource_cffi import ffi, lib
from errno import EINVAL, EPERM
-import _structseq
+import _structseq, os
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -18,106 +11,37 @@
class error(Exception):
pass
+class struct_rusage:
+ """struct_rusage: Result from getrusage.
-# Read required libc functions
-_getrusage = libc.getrusage
-_getrlimit = libc.getrlimit
-_setrlimit = libc.setrlimit
-try:
- _getpagesize = libc.getpagesize
- _getpagesize.argtypes = ()
- _getpagesize.restype = c_int
-except AttributeError:
- from os import sysconf
- _getpagesize = None
+This object may be accessed either as a tuple of
+ (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
+ nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
+or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
-
-class timeval(Structure):
- _fields_ = (
- ("tv_sec", c_long),
- ("tv_usec", c_long),
- )
- def __str__(self):
- return "(%s, %s)" % (self.tv_sec, self.tv_usec)
-
- def __float__(self):
- return self.tv_sec + self.tv_usec/1000000.0
-
-class _struct_rusage(Structure):
- _fields_ = (
- ("ru_utime", timeval),
- ("ru_stime", timeval),
- ("ru_maxrss", c_long),
- ("ru_ixrss", c_long),
- ("ru_idrss", c_long),
- ("ru_isrss", c_long),
- ("ru_minflt", c_long),
- ("ru_majflt", c_long),
- ("ru_nswap", c_long),
- ("ru_inblock", c_long),
- ("ru_oublock", c_long),
- ("ru_msgsnd", c_long),
- ("ru_msgrcv", c_long),
- ("ru_nsignals", c_long),
- ("ru_nvcsw", c_long),
- ("ru_nivcsw", c_long),
- )
-
-_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
-_getrusage.restype = c_int
-
-
-class struct_rusage:
__metaclass__ = _structseq.structseqtype
- ru_utime = _structseq.structseqfield(0)
- ru_stime = _structseq.structseqfield(1)
- ru_maxrss = _structseq.structseqfield(2)
- ru_ixrss = _structseq.structseqfield(3)
- ru_idrss = _structseq.structseqfield(4)
- ru_isrss = _structseq.structseqfield(5)
- ru_minflt = _structseq.structseqfield(6)
- ru_majflt = _structseq.structseqfield(7)
- ru_nswap = _structseq.structseqfield(8)
- ru_inblock = _structseq.structseqfield(9)
- ru_oublock = _structseq.structseqfield(10)
- ru_msgsnd = _structseq.structseqfield(11)
- ru_msgrcv = _structseq.structseqfield(12)
- ru_nsignals = _structseq.structseqfield(13)
- ru_nvcsw = _structseq.structseqfield(14)
- ru_nivcsw = _structseq.structseqfield(15)
+ ru_utime = _structseq.structseqfield(0, "user time used")
+ ru_stime = _structseq.structseqfield(1, "system time used")
+ ru_maxrss = _structseq.structseqfield(2, "max. resident set size")
+ ru_ixrss = _structseq.structseqfield(3, "shared memory size")
+ ru_idrss = _structseq.structseqfield(4, "unshared data size")
+ ru_isrss = _structseq.structseqfield(5, "unshared stack size")
+ ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O")
+ ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O")
+ ru_nswap = _structseq.structseqfield(8, "number of swap outs")
+ ru_inblock = _structseq.structseqfield(9, "block input operations")
+ ru_oublock = _structseq.structseqfield(10, "block output operations")
+ ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent")
+ ru_msgrcv = _structseq.structseqfield(12, "IPC messages received")
+ ru_nsignals = _structseq.structseqfield(13,"signals received")
+ ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches")
+ ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches")
- at builtinify
-def rlimit_check_bounds(rlim_cur, rlim_max):
- if rlim_cur > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_cur)
- if rlim_max > rlim_t_max:
- raise ValueError("%d does not fit into rlim_t" % rlim_max)
-
-class rlimit(Structure):
- _fields_ = (
- ("rlim_cur", rlim_t),
- ("rlim_max", rlim_t),
- )
-
-_getrlimit.argtypes = (c_int, POINTER(rlimit))
-_getrlimit.restype = c_int
-_setrlimit.argtypes = (c_int, POINTER(rlimit))
-_setrlimit.restype = c_int
-
-
- at builtinify
-def getrusage(who):
- ru = _struct_rusage()
- ret = _getrusage(who, byref(ru))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- raise ValueError("invalid who parameter")
- raise error(errno)
+def _make_struct_rusage(ru):
return struct_rusage((
- float(ru.ru_utime),
- float(ru.ru_stime),
+ lib.my_utime(ru),
+ lib.my_stime(ru),
ru.ru_maxrss,
ru.ru_ixrss,
ru.ru_idrss,
@@ -135,48 +59,59 @@
))
@builtinify
+def getrusage(who):
+ ru = ffi.new("struct rusage *")
+ if lib.getrusage(who, ru) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("invalid who parameter")
+ raise error(ffi.errno)
+ return _make_struct_rusage(ru)
+
+ at builtinify
def getrlimit(resource):
- if not(0 <= resource < RLIM_NLIMITS):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlim = rlimit()
- ret = _getrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- raise error(errno)
- return (rlim.rlim_cur, rlim.rlim_max)
+ result = ffi.new("long long[2]")
+ if lib.my_getrlimit(resource, result) == -1:
+ raise error(ffi.errno)
+ return (result[0], result[1])
@builtinify
-def setrlimit(resource, rlim):
- if not(0 <= resource < RLIM_NLIMITS):
+def setrlimit(resource, limits):
+ if not (0 <= resource < lib.RLIM_NLIMITS):
return ValueError("invalid resource specified")
- rlimit_check_bounds(*rlim)
- rlim = rlimit(rlim[0], rlim[1])
- ret = _setrlimit(resource, byref(rlim))
- if ret == -1:
- errno = get_errno()
- if errno == EINVAL:
- return ValueError("current limit exceeds maximum limit")
- elif errno == EPERM:
- return ValueError("not allowed to raise maximum limit")
+ limits = tuple(limits)
+ if len(limits) != 2:
+ raise ValueError("expected a tuple of 2 integers")
+
+ if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+ if ffi.errno == EINVAL:
+ raise ValueError("current limit exceeds maximum limit")
+ elif ffi.errno == EPERM:
+ raise ValueError("not allowed to raise maximum limit")
else:
- raise error(errno)
+ raise error(ffi.errno)
+
@builtinify
def getpagesize():
- if _getpagesize:
- return _getpagesize()
- else:
- try:
- return sysconf("SC_PAGE_SIZE")
- except ValueError:
- # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE
- return sysconf("SC_PAGESIZE")
+ return os.sysconf("SC_PAGESIZE")
-__all__ = ALL_CONSTANTS + (
- 'error', 'timeval', 'struct_rusage', 'rlimit',
- 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize',
+
+def _setup():
+ all_constants = []
+ p = lib.my_rlimit_consts
+ while p.name:
+ name = ffi.string(p.name)
+ globals()[name] = int(p.value)
+ all_constants.append(name)
+ p += 1
+ return all_constants
+
+__all__ = tuple(_setup()) + (
+ 'error', 'getpagesize', 'struct_rusage',
+ 'getrusage', 'getrlimit', 'setrlimit',
)
-
-del ALL_CONSTANTS
+del _setup
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
# if log is not opened, open it now
if not _S_log_open:
openlog()
+ if isinstance(message, unicode):
+ message = str(message)
lib.syslog(priority, "%s", message)
@builtinify
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -46,7 +46,6 @@
except detect_cpu.ProcessorAutodetectError:
pass
-
translation_modules = default_modules.copy()
translation_modules.update([
"fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
@@ -205,15 +204,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -223,39 +213,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withmethodcache", True),
- ]),
-
- BoolOption("withrangelist",
- "enable special range list implementation that does not "
- "actually create the full list until the resulting "
- "list is mutated",
- default=False),
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -266,22 +231,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -297,15 +250,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -318,16 +266,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst
--- a/pypy/doc/__pypy__-module.rst
+++ b/pypy/doc/__pypy__-module.rst
@@ -18,6 +18,7 @@
- ``bytebuffer(length)``: return a new read-write buffer of the given length.
It works like a simplified array of characters (actually, depending on the
configuration the ``array`` module internally uses this).
+ - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation).
Transparent Proxy Functionality
@@ -37,4 +38,3 @@
--------------------------------------------------------
- ``isfake(obj)``: returns True if ``obj`` is faked.
- - ``interp_pdb()``: start a pdb at interpreter-level.
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev
+ tk-dev libgc-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -81,13 +81,13 @@
Simon Burton
Martin Matusiak
Konstantin Lopuhin
+ Stefano Rivera
Wenzhu Man
John Witulski
Laurence Tratt
Ivan Sichmann Freitas
Greg Price
Dario Bertini
- Stefano Rivera
Mark Pearse
Simon Cross
Andreas Stührk
@@ -95,9 +95,10 @@
Jean-Philippe St. Pierre
Guido van Rossum
Pavel Vinogradov
+ Spenser Bauman
Jeremy Thurgood
Paweł Piotr Przeradowski
- Spenser Bauman
+ Tobias Pape
Paul deGrandis
Ilya Osadchiy
marky1991
@@ -109,7 +110,7 @@
Georg Brandl
Bert Freudenberg
Stian Andreassen
- Tobias Pape
+ Mark Young
Wanja Saatkamp
Gerald Klix
Mike Blume
@@ -140,9 +141,9 @@
Yichao Yu
Rocco Moretti
Gintautas Miliauskas
+ Devin Jeanpierre
Michael Twomey
Lucian Branescu Mihaila
- Devin Jeanpierre
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -153,6 +154,7 @@
Victor Stinner
Andrews Medina
anatoly techtonik
+ Sergey Matyunin
Stuart Williams
Jasper Schulz
Christian Hudon
@@ -187,7 +189,6 @@
Arjun Naik
Valentina Mukhamedzhanova
Stefano Parmesan
- Mark Young
Alexis Daboville
Jens-Uwe Mager
Carl Meyer
@@ -195,7 +196,9 @@
Pieter Zieschang
Gabriel
Lukas Vacek
+ Kunal Grover
Andrew Dalke
+ Florin Papa
Sylvain Thenault
Jakub Stasiak
Nathan Taylor
@@ -210,7 +213,6 @@
Kristjan Valur Jonsson
David Lievens
Neil Blakey-Milner
- Sergey Matyunin
Lutz Paelike
Lucio Torre
Lars Wassermann
@@ -222,9 +224,11 @@
Artur Lisiecki
Sergey Kishchenko
Ignas Mikalajunas
+ Alecsandru Patrascu
Christoph Gerum
Martin Blais
Lene Wagner
+ Catalin Gabriel Manciu
Tomo Cocoa
Kim Jin Su
Toni Mattis
@@ -261,6 +265,7 @@
Akira Li
Gustavo Niemeyer
Stephan Busemann
+ florinpapa
Rafał Gałczyński
Matt Bogosian
Christian Muirhead
@@ -275,6 +280,7 @@
Boglarka Vezer
Chris Pressey
Buck Golemon
+ Diana Popa
Konrad Delong
Dinu Gherman
Chris Lambacher
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts `
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,127 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
+
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+In theory, it would kind of work if you cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
From pypy.commits at gmail.com Wed May 11 14:22:05 2016
From: pypy.commits at gmail.com (rlamy)
Date: Wed, 11 May 2016 11:22:05 -0700 (PDT)
Subject: [pypy-commit] pypy default: turn make_wrapper() into a method of
ApiFunction
Message-ID: <5733784d.882cc20a.65186.4100@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84383:51732368583f
Date: 2016-05-11 19:21 +0100
http://bitbucket.org/pypy/pypy/changeset/51732368583f/
Log: turn make_wrapper() into a method of ApiFunction
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -203,46 +203,46 @@
# id. Invariant: this variable always contain 0 when the PyPy GIL is
# released. It should also contain 0 when regular RPython code
# executes. In non-cpyext-related code, it will thus always be 0.
-#
+#
# **make_generic_cpy_call():** RPython to C, with the GIL held. Before
# the call, must assert that the global variable is 0 and set the
# current thread identifier into the global variable. After the call,
# assert that the global variable still contains the current thread id,
# and reset it to 0.
-#
+#
# **make_wrapper():** C to RPython; by default assume that the GIL is
# held, but accepts gil="acquire", "release", "around",
# "pygilstate_ensure", "pygilstate_release".
-#
+#
# When a wrapper() is called:
-#
+#
# * "acquire": assert that the GIL is not currently held, i.e. the
# global variable does not contain the current thread id (otherwise,
# deadlock!). Acquire the PyPy GIL. After we acquired it, assert
# that the global variable is 0 (it must be 0 according to the
# invariant that it was 0 immediately before we acquired the GIL,
# because the GIL was released at that point).
-#
+#
# * gil=None: we hold the GIL already. Assert that the current thread
# identifier is in the global variable, and replace it with 0.
-#
+#
# * "pygilstate_ensure": if the global variable contains the current
# thread id, replace it with 0 and set the extra arg to 0. Otherwise,
# do the "acquire" and set the extra arg to 1. Then we'll call
# pystate.py:PyGILState_Ensure() with this extra arg, which will do
# the rest of the logic.
-#
+#
# When a wrapper() returns, first assert that the global variable is
# still 0, and then:
-#
+#
# * "release": release the PyPy GIL. The global variable was 0 up to
# and including at the point where we released the GIL, but afterwards
# it is possible that the GIL is acquired by a different thread very
# quickly.
-#
+#
# * gil=None: we keep holding the GIL. Set the current thread
# identifier into the global variable.
-#
+#
# * "pygilstate_release": if the argument is PyGILState_UNLOCKED,
# release the PyPy GIL; otherwise, set the current thread identifier
# into the global variable. The rest of the logic of
@@ -254,7 +254,7 @@
cpyext_namespace = NameManager('cpyext_')
-class ApiFunction:
+class ApiFunction(object):
def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED,
c_name=None, gil=None, result_borrowed=False, result_is_ll=False):
self.argtypes = argtypes
@@ -292,13 +292,61 @@
def get_wrapper(self, space):
wrapper = getattr(self, '_wrapper', None)
if wrapper is None:
- wrapper = make_wrapper(space, self.callable, self.gil)
+ wrapper = self._make_wrapper(space)
self._wrapper = wrapper
wrapper.relax_sig_check = True
if self.c_name is not None:
wrapper.c_name = cpyext_namespace.uniquename(self.c_name)
return wrapper
+ # Make the wrapper for the cases (1) and (2)
+ def _make_wrapper(self, space):
+ "NOT_RPYTHON"
+ # This logic is obscure, because we try to avoid creating one
+ # big wrapper() function for every callable. Instead we create
+ # only one per "signature".
+
+ callable = self.callable
+ gil = self.gil
+ argnames = self.argnames
+ argtypesw = zip(self.argtypes,
+ [_name.startswith("w_") for _name in argnames])
+ error_value = getattr(self, "error_value", CANNOT_FAIL)
+ if (isinstance(self.restype, lltype.Ptr)
+ and error_value is not CANNOT_FAIL):
+ assert lltype.typeOf(error_value) == self.restype
+ assert not error_value # only support error=NULL
+ error_value = 0 # because NULL is not hashable
+
+ if self.result_is_ll:
+ result_kind = "L"
+ elif self.result_borrowed:
+ result_kind = "B" # note: 'result_borrowed' is ignored if we also
+ else: # say 'result_is_ll=True' (in this case it's
+ result_kind = "." # up to you to handle refcounting anyway)
+
+ signature = (tuple(argtypesw),
+ self.restype,
+ result_kind,
+ error_value,
+ gil)
+
+ cache = space.fromcache(WrapperCache)
+ cache.stats[1] += 1
+ try:
+ wrapper_gen = cache.wrapper_gens[signature]
+ except KeyError:
+ #print signature
+ wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
+ signature)
+ cache.stats[0] += 1
+ #print 'Wrapper cache [wrappers/total]:', cache.stats
+ wrapper = wrapper_gen.make_wrapper(callable)
+ wrapper.relax_sig_check = True
+ if self.c_name is not None:
+ wrapper.c_name = cpyext_namespace.uniquename(self.c_name)
+ return wrapper
+
DEFAULT_HEADER = 'pypy_decl.h'
def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER,
gil=None, result_borrowed=False, result_is_ll=False):
@@ -709,48 +757,6 @@
return wrapper
-# Make the wrapper for the cases (1) and (2)
-def make_wrapper(space, callable, gil=None):
- "NOT_RPYTHON"
- # This logic is obscure, because we try to avoid creating one
- # big wrapper() function for every callable. Instead we create
- # only one per "signature".
-
- argnames = callable.api_func.argnames
- argtypesw = zip(callable.api_func.argtypes,
- [_name.startswith("w_") for _name in argnames])
- error_value = getattr(callable.api_func, "error_value", CANNOT_FAIL)
- if (isinstance(callable.api_func.restype, lltype.Ptr)
- and error_value is not CANNOT_FAIL):
- assert lltype.typeOf(error_value) == callable.api_func.restype
- assert not error_value # only support error=NULL
- error_value = 0 # because NULL is not hashable
-
- if callable.api_func.result_is_ll:
- result_kind = "L"
- elif callable.api_func.result_borrowed:
- result_kind = "B" # note: 'result_borrowed' is ignored if we also
- else: # say 'result_is_ll=True' (in this case it's
- result_kind = "." # up to you to handle refcounting anyway)
-
- signature = (tuple(argtypesw),
- callable.api_func.restype,
- result_kind,
- error_value,
- gil)
-
- cache = space.fromcache(WrapperCache)
- cache.stats[1] += 1
- try:
- wrapper_gen = cache.wrapper_gens[signature]
- except KeyError:
- #print signature
- wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
- signature)
- cache.stats[0] += 1
- #print 'Wrapper cache [wrappers/total]:', cache.stats
- return wrapper_gen.make_wrapper(callable)
-
@dont_inline
def deadlock_error(funcname):
@@ -1019,7 +1025,7 @@
structindex = {}
for header, header_functions in FUNCTIONS_BY_HEADER.iteritems():
for name, func in header_functions.iteritems():
- if not func:
+ if not func:
# added only for the macro, not the decl
continue
restype, args = c_function_signature(db, func)
@@ -1033,7 +1039,7 @@
RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI;
""" % dict(members=structmembers)
- functions = generate_decls_and_callbacks(db, export_symbols,
+ functions = generate_decls_and_callbacks(db, export_symbols,
prefix='cpyexttest')
global_objects = []
@@ -1415,7 +1421,7 @@
generate_macros(export_symbols, prefix=prefix)
- functions = generate_decls_and_callbacks(db, [], api_struct=False,
+ functions = generate_decls_and_callbacks(db, [], api_struct=False,
prefix=prefix)
code = "#include \n"
if use_micronumpy:
@@ -1471,7 +1477,7 @@
if not func:
continue
newname = mangle_name('PyPy', name) or name
- deco = entrypoint_lowlevel("cpyext", func.argtypes, newname,
+ deco = entrypoint_lowlevel("cpyext", func.argtypes, newname,
relax=True)
deco(func.get_wrapper(space))
diff --git a/pypy/module/cpyext/test/test_translate.py b/pypy/module/cpyext/test/test_translate.py
--- a/pypy/module/cpyext/test/test_translate.py
+++ b/pypy/module/cpyext/test/test_translate.py
@@ -11,11 +11,11 @@
FT = lltype.FuncType([], lltype.Signed)
FTPTR = lltype.Ptr(FT)
- def make_wrapper(space, func, gil=None):
+ def make_wrapper(self, space):
def wrapper():
- return func(space)
+ return self.callable(space)
return wrapper
- monkeypatch.setattr(pypy.module.cpyext.api, 'make_wrapper', make_wrapper)
+ monkeypatch.setattr(pypy.module.cpyext.api.ApiFunction, '_make_wrapper', make_wrapper)
@specialize.memo()
def get_tp_function(space, typedef):
From pypy.commits at gmail.com Wed May 11 14:22:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 11:22:44 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: in-progress,
hypothesis testing of the bitmask encoding
Message-ID: <57337874.4106c20a.ef9a8.409e@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84384:56e5c4403abf
Date: 2016-05-11 20:22 +0200
http://bitbucket.org/pypy/pypy/changeset/56e5c4403abf/
Log: in-progress, hypothesis testing of the bitmask encoding
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -1,5 +1,7 @@
-from rpython.flowspace.model import mkentrymap, Variable
+from rpython.rtyper.lltypesystem import lltype, llmemory
+from rpython.flowspace.model import mkentrymap, Variable, Constant
from rpython.tool.algo.regalloc import perform_register_allocation
+from rpython.translator.unsimplify import varoftype
def is_trivial_rewrite(op):
@@ -77,6 +79,8 @@
for v in op.args:
assert v in interesting_vars # must be pushed just above
pending_succ.append((block, v))
+ if not interesting_vars:
+ return None
# If there is a path from a gc_pop_roots(v) to a subsequent
# gc_push_roots(w) where w contains the same value as v along that
@@ -96,36 +100,115 @@
def allocate_registers(graph):
interesting_vars = find_interesting_variables(graph)
+ if not interesting_vars:
+ return None
regalloc = perform_register_allocation(graph, interesting_vars.__contains__)
+ regalloc.find_num_colors()
return regalloc
-def move_pushes_earlier(graph):
+def _gc_save_root(index, var):
+ c_index = Constant(index, lltype.Signed)
+ return SpaceOperation('gc_save_root', [c_index, var],
+ varoftype(lltype.Void))
+
+c_NULL = Constant(lltype.nullptr(llmemory.GCREF.TO), llmemory.GCREF)
+
+def make_bitmask(filled):
+ n = filled.count(False)
+ if n == 0:
+ return (None, None)
+ if n == 1:
+ return (filled.index(False), c_NULL)
+ bitmask = 0
+ last_index = 0
+ for i in range(len(filled)):
+ if not filled[i]:
+ bitmask <<= (i - last_index)
+ last_index = i
+ bitmask |= 1
+ return (last_index, Constant(bitmask, lltype.Signed))
+
+
+def expand_push_roots(graph, regalloc):
+ """Expand gc_push_roots into a series of gc_save_root, including
+ writing a bitmask tag to mark some entries as not-in-use
+ """
+ for block in graph.iterblocks():
+ any_change = False
+ newops = []
+ for op in block.operations:
+ if op.opname == 'gc_push_roots':
+ if regalloc is None:
+ assert len(op.args) == 0
+ else:
+ filled = [False] * regalloc.numcolors
+ for v in op.args:
+ index = regalloc.getcolor(v)
+ assert not filled[index]
+ filled[index] = True
+ newops.append(_gc_save_root(index, v))
+ bitmask_index, bitmask_v = make_bitmask(filled)
+ if bitmask_index is not None:
+ newops.append(_gc_save_root(bitmask_index, bitmask_v))
+ any_change = True
+ else:
+ newops.append(op)
+ if any_change:
+ block.operations = newops
+
+
+def move_pushes_earlier(graph, regalloc):
"""gc_push_roots and gc_pop_roots are pushes/pops to the shadowstack,
immediately enclosing the operation that needs them (typically a call).
Here, we try to move individual pushes earlier, in fact as early as
possible under the following conditions: we only move it across vars
that are 'interesting_vars'; and we stop when we encounter the
- operation that produces the value, or when we encounter a gc_pop_roots
- that pops off the same stack location. In the latter case, if that
- gc_pop_roots pops the same value out of the same stack location, then
- success: we can remove the gc_push_root on that path.
+ operation that produces the value, or when we encounter a gc_pop_roots.
+ In the latter case, if that gc_pop_roots pops the same value out of the
+ same stack location, then success: we can remove the gc_push_root on
+ that path.
If the process succeeds to remove the gc_push_root along at least
one path, we generate it explicitly on the other paths, and we
remove the original gc_push_root. If the process doesn't succeed
in doing any such removal, we don't do anything.
+ """
+ # Concrete example (assembler tested on x86-64 gcc 5.3 and clang 3.7):
+ #
+ # ----original---- ----move_pushes_earlier----
+ #
+ # while (a > 10) { *foo = b;
+ # *foo = b; while (a > 10) {
+ # a = g(a); a = g(a);
+ # b = *foo; b = *foo;
+ # // *foo = b;
+ # } }
+ # return b; return b;
+ #
+ # => the store and the => the store is before, and gcc/clang
+ # load are in the loop, moves the load after the loop
+ # even in the assembler (the commented-out '*foo=b' is removed
+ # by this function, but gcc/clang would
+ # also remove it)
- Note that it would be possible to do exactly the same in the
- opposite direction by exchanging the roles of "push/earlier" and
- "pop/later". I think doing both is pointless---one direction is
- enough. The direction we chose here keeps gc_pop_roots unmodified.
- The C compiler should be better at discarding them if unused.
- """
-
x.x.x.x
+def expand_push_pop_roots(graph):
+ xxxxxxxxx
+ for block in graph.iterblocks():
+ for op in block.operations:
+ if op.opname == 'gc_push_roots':
+ for v in op.args:
+ interesting_vars.add(v)
+ pending_pred.append((block, v))
+ elif op.opname == 'gc_pop_roots':
+ for v in op.args:
+ assert v in interesting_vars # must be pushed just above
+ pending_succ.append((block, v))
+
+
def postprocess_graph(gct, graph):
"""Collect information about the gc_push_roots and gc_pop_roots
added in this complete graph, and replace them with real operations.
diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py
--- a/rpython/memory/gctransform/shadowstack.py
+++ b/rpython/memory/gctransform/shadowstack.py
@@ -29,11 +29,14 @@
def push_roots(self, hop, keep_current_args=False):
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
- hop.genop("gc_push_roots", livevars) # even if len(livevars) == 0
+ if livevars:
+ hop.genop("gc_push_roots", livevars)
return livevars
def pop_roots(self, hop, livevars):
- hop.genop("gc_pop_roots", list(livevars)) # even if len(livevars) == 0
+ hop.genop("gc_pop_roots", livevars)
+ # NB. we emit it even if len(livevars) == 0; this is needed for
+ # shadowcolor.move_pushes_earlier()
class ShadowStackRootWalker(BaseRootWalker):
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -3,6 +3,7 @@
from rpython.rtyper.test.test_llinterp import gengraph
from rpython.conftest import option
from rpython.memory.gctransform.shadowcolor import *
+from hypothesis import given, strategies
def make_graph(f, argtypes):
@@ -242,3 +243,25 @@
graph = make_graph(f, [llmemory.GCREF, int, llmemory.GCREF])
regalloc = allocate_registers(graph)
assert summary_regalloc(regalloc) == [('a', 1)] * 2 + [('c', 0)] * 2
+
+ at given(strategies.lists(strategies.booleans()))
+def test_make_bitmask(boollist):
+ index, c = make_bitmask(boollist)
+ if index is None:
+ assert c is None
+ else:
+ assert 0 <= index < len(boollist)
+ assert boollist[index] == False
+ if c == c_NULL:
+ bitmask = 1
+ else:
+ assert c.concretetype == lltype.Signed
+ bitmask = c.value
+ while bitmask:
+ if bitmask & 1:
+ assert index >= 0
+ assert boollist[index] == False
+ boollist[index] = True
+ bitmask >>= 1
+ index -= 1
+ assert boollist == [True] * len(boollist)
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -513,8 +513,12 @@
'gc_rawrefcount_from_obj': LLOp(sideeffects=False),
'gc_rawrefcount_to_obj': LLOp(sideeffects=False),
- 'gc_push_roots' : LLOp(),
- 'gc_pop_roots' : LLOp(),
+ 'gc_push_roots' : LLOp(), # temporary: list of roots to save
+ 'gc_pop_roots' : LLOp(), # temporary: list of roots to restore
+ 'gc_enter_roots_frame' : LLOp(), # reserve N entries, save local frame pos
+ 'gc_leave_roots_frame' : LLOp(), # restore shadowstack ptr from saved pos
+ 'gc_save_root' : LLOp(), # save value Y in shadowstack pos X
+ 'gc_restore_root' : LLOp(), # restore value Y from shadowstack pos X
# ------- JIT & GC interaction, only for some GCs ----------
diff --git a/rpython/tool/algo/regalloc.py b/rpython/tool/algo/regalloc.py
--- a/rpython/tool/algo/regalloc.py
+++ b/rpython/tool/algo/regalloc.py
@@ -117,6 +117,13 @@
for v in block.getvariables():
print '\t', v, '\t', self.getcolor(v)
+ def find_num_colors(self):
+ if self._coloring:
+ numcolors = max(self._coloring.values()) + 1
+ else:
+ numcolors = 0
+ self.numcolors = numcolors
+
def getcolor(self, v):
return self._coloring[self._unionfind.find_rep(v)]
From pypy.commits at gmail.com Wed May 11 14:36:27 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 11:36:27 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: Test
Message-ID: <57337bab.e9f1c20a.7cf08.ffff8b5e@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84385:09e14faba25a
Date: 2016-05-11 20:36 +0200
http://bitbucket.org/pypy/pypy/changeset/09e14faba25a/
Log: Test
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -1,5 +1,6 @@
from rpython.rtyper.lltypesystem import lltype, llmemory
-from rpython.flowspace.model import mkentrymap, Variable, Constant
+from rpython.flowspace.model import mkentrymap
+from rpython.flowspace.model import Variable, Constant, SpaceOperation
from rpython.tool.algo.regalloc import perform_register_allocation
from rpython.translator.unsimplify import varoftype
@@ -130,6 +131,21 @@
return (last_index, Constant(bitmask, lltype.Signed))
+def expand_one_push_roots(regalloc, args):
+ if regalloc is None:
+ assert len(args) == 0
+ else:
+ filled = [False] * regalloc.numcolors
+ for v in args:
+ index = regalloc.getcolor(v)
+ assert not filled[index]
+ filled[index] = True
+ yield _gc_save_root(index, v)
+ bitmask_index, bitmask_v = make_bitmask(filled)
+ if bitmask_index is not None:
+ yield _gc_save_root(bitmask_index, bitmask_v)
+
+
def expand_push_roots(graph, regalloc):
"""Expand gc_push_roots into a series of gc_save_root, including
writing a bitmask tag to mark some entries as not-in-use
@@ -139,18 +155,7 @@
newops = []
for op in block.operations:
if op.opname == 'gc_push_roots':
- if regalloc is None:
- assert len(op.args) == 0
- else:
- filled = [False] * regalloc.numcolors
- for v in op.args:
- index = regalloc.getcolor(v)
- assert not filled[index]
- filled[index] = True
- newops.append(_gc_save_root(index, v))
- bitmask_index, bitmask_v = make_bitmask(filled)
- if bitmask_index is not None:
- newops.append(_gc_save_root(bitmask_index, bitmask_v))
+ newops += expand_one_push_roots(regalloc, op)
any_change = True
else:
newops.append(op)
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -265,3 +265,30 @@
bitmask >>= 1
index -= 1
assert boollist == [True] * len(boollist)
+
+
+class FakeRegAlloc:
+ def __init__(self, **colors):
+ self.numcolors = len(colors)
+ self.getcolor = colors.__getitem__
+
+def check_expand_one_push_roots(regalloc, args):
+ got = list(expand_one_push_roots(regalloc, args))
+ result = []
+ for spaceop in got:
+ assert spaceop.opname == 'gc_save_root'
+ result.append((spaceop.args[0].value, spaceop.args[1]))
+ return result
+
+def test_expand_one_push_roots():
+ regalloc = FakeRegAlloc(a=0, b=1, c=2)
+ assert check_expand_one_push_roots(regalloc, ['a', 'b', 'c']) == [
+ (0, 'a'), (1, 'b'), (2, 'c')]
+ assert check_expand_one_push_roots(regalloc, ['a', 'c']) == [
+ (0, 'a'), (2, 'c'), (1, c_NULL)]
+ assert check_expand_one_push_roots(regalloc, ['b']) == [
+ (1, 'b'), (2, Constant(0x5, lltype.Signed))]
+ assert check_expand_one_push_roots(regalloc, ['a']) == [
+ (0, 'a'), (2, Constant(0x3, lltype.Signed))]
+ assert check_expand_one_push_roots(regalloc, []) == [
+ (2, Constant(0x7, lltype.Signed))]
From pypy.commits at gmail.com Wed May 11 14:42:30 2016
From: pypy.commits at gmail.com (rlamy)
Date: Wed, 11 May 2016 11:42:30 -0700 (PDT)
Subject: [pypy-commit] pypy default: Clean up code and debugging artifacts
Message-ID: <57337d16.e9f1c20a.7cf08.ffff8d53@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r84386:63ca6c7d90a0
Date: 2016-05-11 19:41 +0100
http://bitbucket.org/pypy/pypy/changeset/63ca6c7d90a0/
Log: Clean up code and debugging artifacts
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -292,11 +292,7 @@
def get_wrapper(self, space):
wrapper = getattr(self, '_wrapper', None)
if wrapper is None:
- wrapper = self._make_wrapper(space)
- self._wrapper = wrapper
- wrapper.relax_sig_check = True
- if self.c_name is not None:
- wrapper.c_name = cpyext_namespace.uniquename(self.c_name)
+ wrapper = self._wrapper = self._make_wrapper(space)
return wrapper
# Make the wrapper for the cases (1) and (2)
@@ -306,11 +302,8 @@
# big wrapper() function for every callable. Instead we create
# only one per "signature".
- callable = self.callable
- gil = self.gil
- argnames = self.argnames
argtypesw = zip(self.argtypes,
- [_name.startswith("w_") for _name in argnames])
+ [_name.startswith("w_") for _name in self.argnames])
error_value = getattr(self, "error_value", CANNOT_FAIL)
if (isinstance(self.restype, lltype.Ptr)
and error_value is not CANNOT_FAIL):
@@ -329,19 +322,15 @@
self.restype,
result_kind,
error_value,
- gil)
+ self.gil)
cache = space.fromcache(WrapperCache)
- cache.stats[1] += 1
try:
wrapper_gen = cache.wrapper_gens[signature]
except KeyError:
- #print signature
- wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space,
- signature)
- cache.stats[0] += 1
- #print 'Wrapper cache [wrappers/total]:', cache.stats
- wrapper = wrapper_gen.make_wrapper(callable)
+ wrapper_gen = WrapperGen(space, signature)
+ cache.wrapper_gens[signature] = wrapper_gen
+ wrapper = wrapper_gen.make_wrapper(self.callable)
wrapper.relax_sig_check = True
if self.c_name is not None:
wrapper.c_name = cpyext_namespace.uniquename(self.c_name)
@@ -731,7 +720,6 @@
def __init__(self, space):
self.space = space
self.wrapper_gens = {} # {signature: WrapperGen()}
- self.stats = [0, 0]
class WrapperGen(object):
wrapper_second_level = None
From pypy.commits at gmail.com Wed May 11 14:49:23 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 11 May 2016 11:49:23 -0700 (PDT)
Subject: [pypy-commit] pypy shadowstack-perf-2: expand_pop_roots
Message-ID: <57337eb3.923f1c0a.5b0e0.ffff8394@mx.google.com>
Author: Armin Rigo
Branch: shadowstack-perf-2
Changeset: r84387:5b47be0086d7
Date: 2016-05-11 20:49 +0200
http://bitbucket.org/pypy/pypy/changeset/5b47be0086d7/
Log: expand_pop_roots
diff --git a/rpython/memory/gctransform/shadowcolor.py b/rpython/memory/gctransform/shadowcolor.py
--- a/rpython/memory/gctransform/shadowcolor.py
+++ b/rpython/memory/gctransform/shadowcolor.py
@@ -113,6 +113,11 @@
return SpaceOperation('gc_save_root', [c_index, var],
varoftype(lltype.Void))
+def _gc_restore_root(index, var):
+ c_index = Constant(index, lltype.Signed)
+ return SpaceOperation('gc_restore_root', [c_index, var],
+ varoftype(lltype.Void))
+
c_NULL = Constant(lltype.nullptr(llmemory.GCREF.TO), llmemory.GCREF)
def make_bitmask(filled):
@@ -128,6 +133,7 @@
bitmask <<= (i - last_index)
last_index = i
bitmask |= 1
+ assert bitmask & 1
return (last_index, Constant(bitmask, lltype.Signed))
@@ -141,14 +147,23 @@
assert not filled[index]
filled[index] = True
yield _gc_save_root(index, v)
- bitmask_index, bitmask_v = make_bitmask(filled)
+ bitmask_index, bitmask_c = make_bitmask(filled)
if bitmask_index is not None:
- yield _gc_save_root(bitmask_index, bitmask_v)
+ yield _gc_save_root(bitmask_index, bitmask_c)
+
+def expand_one_pop_roots(regalloc, args):
+ if regalloc is None:
+ assert len(args) == 0
+ else:
+ for v in args:
+ index = regalloc.getcolor(v)
+ yield _gc_restore_root(index, v)
def expand_push_roots(graph, regalloc):
"""Expand gc_push_roots into a series of gc_save_root, including
- writing a bitmask tag to mark some entries as not-in-use
+ writing a bitmask tag to mark some entries as not-in-use.
+ (If regalloc is None, it will still remove empty gc_push_roots.)
"""
for block in graph.iterblocks():
any_change = False
@@ -200,18 +215,22 @@
x.x.x.x
-def expand_push_pop_roots(graph):
- xxxxxxxxx
+def expand_pop_roots(graph):
+ """gc_pop_roots => series of gc_restore_root; this is done after
+ move_pushes_earlier() because that one doesn't work correctly if
+ a completely-empty gc_pop_roots is removed.
+ """
for block in graph.iterblocks():
+ any_change = False
+ newops = []
for op in block.operations:
- if op.opname == 'gc_push_roots':
- for v in op.args:
- interesting_vars.add(v)
- pending_pred.append((block, v))
- elif op.opname == 'gc_pop_roots':
- for v in op.args:
- assert v in interesting_vars # must be pushed just above
- pending_succ.append((block, v))
+ if op.opname == 'gc_pop_roots':
+ newops += expand_one_pop_roots(regalloc, op)
+ any_change = True
+ else:
+ newops.append(op)
+ if any_change:
+ block.operations = newops
def postprocess_graph(gct, graph):
@@ -219,4 +238,7 @@
added in this complete graph, and replace them with real operations.
"""
regalloc = allocate_registers(graph)
+ expand_push_roots(graph, regalloc)
+ move_pushes_earlier(graph, regalloc)
+ expand_pop_roots(graph, regalloc)
xxxx
diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py
--- a/rpython/memory/gctransform/shadowstack.py
+++ b/rpython/memory/gctransform/shadowstack.py
@@ -29,8 +29,7 @@
def push_roots(self, hop, keep_current_args=False):
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
- if livevars:
- hop.genop("gc_push_roots", livevars)
+ hop.genop("gc_push_roots", livevars)
return livevars
def pop_roots(self, hop, livevars):
diff --git a/rpython/memory/gctransform/test/test_shadowcolor.py b/rpython/memory/gctransform/test/test_shadowcolor.py
--- a/rpython/memory/gctransform/test/test_shadowcolor.py
+++ b/rpython/memory/gctransform/test/test_shadowcolor.py
@@ -268,27 +268,44 @@
class FakeRegAlloc:
- def __init__(self, **colors):
+ def __init__(self, expected_op, **colors):
+ self.expected_op = expected_op
self.numcolors = len(colors)
self.getcolor = colors.__getitem__
-def check_expand_one_push_roots(regalloc, args):
- got = list(expand_one_push_roots(regalloc, args))
- result = []
- for spaceop in got:
- assert spaceop.opname == 'gc_save_root'
- result.append((spaceop.args[0].value, spaceop.args[1]))
- return result
+ def check(self, got):
+ got = list(got)
+ result = []
+ for spaceop in got:
+ assert spaceop.opname == self.expected_op
+ result.append((spaceop.args[0].value, spaceop.args[1]))
+ return result
def test_expand_one_push_roots():
- regalloc = FakeRegAlloc(a=0, b=1, c=2)
- assert check_expand_one_push_roots(regalloc, ['a', 'b', 'c']) == [
+ regalloc = FakeRegAlloc('gc_save_root', a=0, b=1, c=2)
+ assert regalloc.check(expand_one_push_roots(regalloc, ['a', 'b', 'c'])) == [
(0, 'a'), (1, 'b'), (2, 'c')]
- assert check_expand_one_push_roots(regalloc, ['a', 'c']) == [
+ assert regalloc.check(expand_one_push_roots(regalloc, ['a', 'c'])) == [
(0, 'a'), (2, 'c'), (1, c_NULL)]
- assert check_expand_one_push_roots(regalloc, ['b']) == [
+ assert regalloc.check(expand_one_push_roots(regalloc, ['b'])) == [
(1, 'b'), (2, Constant(0x5, lltype.Signed))]
- assert check_expand_one_push_roots(regalloc, ['a']) == [
+ assert regalloc.check(expand_one_push_roots(regalloc, ['a'])) == [
(0, 'a'), (2, Constant(0x3, lltype.Signed))]
- assert check_expand_one_push_roots(regalloc, []) == [
+ assert regalloc.check(expand_one_push_roots(regalloc, [])) == [
(2, Constant(0x7, lltype.Signed))]
+
+ assert list(expand_one_push_roots(None, [])) == []
+
+def test_expand_one_pop_roots():
+ regalloc = FakeRegAlloc('gc_restore_root', a=0, b=1, c=2)
+ assert regalloc.check(expand_one_pop_roots(regalloc, ['a', 'b', 'c'])) == [
+ (0, 'a'), (1, 'b'), (2, 'c')]
+ assert regalloc.check(expand_one_pop_roots(regalloc, ['a', 'c'])) == [
+ (0, 'a'), (2, 'c')]
+ assert regalloc.check(expand_one_pop_roots(regalloc, ['b'])) == [
+ (1, 'b')]
+ assert regalloc.check(expand_one_pop_roots(regalloc, ['a'])) == [
+ (0, 'a')]
+ assert regalloc.check(expand_one_pop_roots(regalloc, [])) == []
+
+ assert list(expand_one_pop_roots(None, [])) == []
From pypy.commits at gmail.com Wed May 11 17:09:52 2016
From: pypy.commits at gmail.com (devin.jeanpierre)
Date: Wed, 11 May 2016 14:09:52 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-macros-cast: Back out non-test changes,
since I'm going to rewrite how I make the tests pass.
Message-ID: <57339fa0.d81a1c0a.e9a03.ffffa28e@mx.google.com>
Author: Devin Jeanpierre