[pypy-commit] pypy multiphase: hg merge py3.5
rlamy
pypy.commits at gmail.com
Thu Jul 20 08:25:48 EDT 2017
Author: Ronan Lamy <ronan.lamy at gmail.com>
Branch: multiphase
Changeset: r91943:8cad295748ea
Date: 2017-07-20 14:22 +0200
http://bitbucket.org/pypy/pypy/changeset/8cad295748ea/
Log: hg merge py3.5
diff too long, truncating to 2000 out of 10996 lines
diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -1,6 +1,6 @@
syntax: glob
*.py[co]
-*.sw[po]
+*.sw[pon]
*~
.*.swp
.idea
@@ -10,6 +10,8 @@
.venv
.cache
+.cache/
+.gdb_history
syntax: regexp
^testresult$
^site-packages$
@@ -90,7 +92,6 @@
.hypothesis/
^release/
^rpython/_cache$
-^\.cache$
pypy/module/cppyy/.+/*\.pcm
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -38,3 +38,5 @@
b16a4363e930f6401bceb499b9520955504c6cb0 release-pypy3.5-v5.7.0
1aa2d8e03cdfab54b7121e93fda7e98ea88a30bf release-pypy2.7-v5.7.1
2875f328eae2216a87f3d6f335092832eb031f56 release-pypy3.5-v5.7.1
+c925e73810367cd960a32592dd7f728f436c125c release-pypy2.7-v5.8.0
+a37ecfe5f142bc971a86d17305cc5d1d70abec64 release-pypy3.5-v5.8.0
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -39,11 +39,11 @@
Armin Rigo
Maciej Fijalkowski
- Carl Friedrich Bolz
+ Carl Friedrich Bolz-Tereick
Amaury Forgeot d'Arc
Antonio Cuni
+ Matti Picus
Samuele Pedroni
- Matti Picus
Ronan Lamy
Alex Gaynor
Philip Jenvey
@@ -101,28 +101,28 @@
Vincent Legoll
Michael Foord
Stephan Diehl
+ Stefano Rivera
Stefan Schwarzer
Tomek Meka
Valentino Volonghi
- Stefano Rivera
Patrick Maupin
Devin Jeanpierre
Bob Ippolito
Bruno Gola
David Malcolm
Jean-Paul Calderone
+ Squeaky
Edd Barrett
- Squeaky
Timo Paulssen
Marius Gedminas
Alexandre Fayolle
Simon Burton
Nicolas Truessel
Martin Matusiak
+ Laurence Tratt
Wenzhu Man
Konstantin Lopuhin
John Witulski
- Laurence Tratt
Greg Price
Ivan Sichmann Freitas
Dario Bertini
@@ -149,13 +149,13 @@
Stian Andreassen
Wanja Saatkamp
Mike Blume
+ Joannah Nanjekye
Gerald Klix
Oscar Nierstrasz
Rami Chowdhury
Stefan H. Muller
- Joannah Nanjekye
+ Tim Felgentreff
Eugene Oden
- Tim Felgentreff
Jeff Terrace
Henry Mason
Vasily Kuznetsov
@@ -164,11 +164,11 @@
Dusty Phillips
Lukas Renggli
Guenter Jantzen
+ Jasper Schulz
Ned Batchelder
Amit Regmi
Anton Gulenko
Sergey Matyunin
- Jasper Schulz
Andrew Chambers
Nicolas Chauvat
Andrew Durdin
@@ -183,6 +183,7 @@
Gintautas Miliauskas
Lucian Branescu Mihaila
anatoly techtonik
+ Dodan Mihai
Karl Bartel
Gabriel Lavoie
Jared Grubb
@@ -220,12 +221,14 @@
Vaibhav Sood
Reuben Cummings
Attila Gobi
+ Alecsandru Patrascu
Christopher Pope
Tristan Arthur
Christian Tismer
Dan Stromberg
Carl Meyer
Florin Papa
+ Jens-Uwe Mager
Valentina Mukhamedzhanova
Stefano Parmesan
touilleMan
@@ -264,7 +267,6 @@
Dan Buch
Lene Wagner
Tomo Cocoa
- Alecsandru Patrascu
David Lievens
Neil Blakey-Milner
Henrik Vendelbo
@@ -303,6 +305,7 @@
Anna Katrina Dominguez
Kim Jin Su
Amber Brown
+ Nate Bragg
Ben Darnell
Juan Francisco Cantero Hurtado
Godefroid Chappelle
@@ -340,11 +343,13 @@
Jim Hunziker
shoma hosaka
Buck Golemon
+ Iraklis D.
JohnDoe
yrttyr
Michael Chermside
Anna Ravencroft
remarkablerocket
+ Petre Vijiac
Berker Peksag
Christian Muirhead
soareschen
diff --git a/extra_tests/test_decimal.py b/extra_tests/test_decimal.py
--- a/extra_tests/test_decimal.py
+++ b/extra_tests/test_decimal.py
@@ -1,3 +1,6 @@
+import pytest
+from hypothesis import example, settings, given, strategies as st
+
import pickle
import sys
@@ -8,52 +11,112 @@
# import _decimal as C
# import _pydecimal as P
+ at pytest.yield_fixture(params=[C, P], ids=['_decimal', '_pydecimal'])
+def module(request):
+ yield request.param
-class TestPythonAPI:
+# Translate symbols.
+CondMap = {
+ C.Clamped: P.Clamped,
+ C.ConversionSyntax: P.ConversionSyntax,
+ C.DivisionByZero: P.DivisionByZero,
+ C.DivisionImpossible: P.InvalidOperation,
+ C.DivisionUndefined: P.DivisionUndefined,
+ C.Inexact: P.Inexact,
+ C.InvalidContext: P.InvalidContext,
+ C.InvalidOperation: P.InvalidOperation,
+ C.Overflow: P.Overflow,
+ C.Rounded: P.Rounded,
+ C.Subnormal: P.Subnormal,
+ C.Underflow: P.Underflow,
+ C.FloatOperation: P.FloatOperation,
+}
- def check_equal(self, val, proto):
- d = C.Decimal(val)
- p = pickle.dumps(d, proto)
- assert d == pickle.loads(p)
+def check_same_flags(flags_C, flags_P):
+ for signal in flags_C:
+ assert flags_C[signal] == flags_P[CondMap[signal]]
- def test_C(self):
+
+def test_C():
+ sys.modules["decimal"] = C
+ import decimal
+ d = decimal.Decimal('1')
+ assert isinstance(d, C.Decimal)
+ assert isinstance(d, decimal.Decimal)
+ assert isinstance(d.as_tuple(), C.DecimalTuple)
+
+ assert d == C.Decimal('1')
+
+def check_round_trip(val, proto):
+ d = C.Decimal(val)
+ p = pickle.dumps(d, proto)
+ assert d == pickle.loads(p)
+
+def test_pickle():
+ v = '-3.123e81723'
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
sys.modules["decimal"] = C
- import decimal
- d = decimal.Decimal('1')
- assert isinstance(d, C.Decimal)
- assert isinstance(d, decimal.Decimal)
- assert isinstance(d.as_tuple(), C.DecimalTuple)
+ check_round_trip('-3.141590000', proto)
+ check_round_trip(v, proto)
- assert d == C.Decimal('1')
+ cd = C.Decimal(v)
+ pd = P.Decimal(v)
+ cdt = cd.as_tuple()
+ pdt = pd.as_tuple()
+ assert cdt.__module__ == pdt.__module__
- def test_pickle(self):
- v = '-3.123e81723'
- for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- sys.modules["decimal"] = C
- self.check_equal('-3.141590000', proto)
- self.check_equal(v, proto)
+ p = pickle.dumps(cdt, proto)
+ r = pickle.loads(p)
+ assert isinstance(r, C.DecimalTuple)
+ assert cdt == r
- cd = C.Decimal(v)
- pd = P.Decimal(v)
- cdt = cd.as_tuple()
- pdt = pd.as_tuple()
- assert cdt.__module__ == pdt.__module__
+ sys.modules["decimal"] = C
+ p = pickle.dumps(cd, proto)
+ sys.modules["decimal"] = P
+ r = pickle.loads(p)
+ assert isinstance(r, P.Decimal)
+ assert r == pd
- p = pickle.dumps(cdt, proto)
- r = pickle.loads(p)
- assert isinstance(r, C.DecimalTuple)
- assert cdt == r
+ sys.modules["decimal"] = C
+ p = pickle.dumps(cdt, proto)
+ sys.modules["decimal"] = P
+ r = pickle.loads(p)
+ assert isinstance(r, P.DecimalTuple)
+ assert r == pdt
- sys.modules["decimal"] = C
- p = pickle.dumps(cd, proto)
- sys.modules["decimal"] = P
- r = pickle.loads(p)
- assert isinstance(r, P.Decimal)
- assert r == pd
+def test_compare_total(module):
+ assert module.Decimal('12').compare_total(module.Decimal('12.0')) == 1
+ assert module.Decimal('4367').compare_total(module.Decimal('NaN')) == -1
- sys.modules["decimal"] = C
- p = pickle.dumps(cdt, proto)
- sys.modules["decimal"] = P
- r = pickle.loads(p)
- assert isinstance(r, P.DecimalTuple)
- assert r == pdt
+def test_compare_total_mag(module):
+ assert module.Decimal(1).compare_total_mag(-2) == -1
+
+def convert_arg(module, arg):
+ if isinstance(arg, module.Decimal):
+ return arg
+ elif type(arg).__name__ == 'Decimal':
+ return module.Decimal(str(arg))
+ else:
+ return arg
+
+from fractions import Fraction
+from decimal import Decimal
+
+ at given(st.decimals(), st.decimals() | st.fractions())
+def test_lt(d1, d2):
+ with C.localcontext(C.ExtendedContext) as ctx_C:
+ d1_C = convert_arg(C, d1)
+ d2_C = convert_arg(C, d2)
+ try:
+ res_C = d1_C < d2_C
+ except Exception as e:
+ res_C = str(type(e))
+ with P.localcontext(P.ExtendedContext) as ctx_P:
+ d1_P = convert_arg(P, d1)
+ d2_P = convert_arg(P, d2)
+ try:
+ res_P = d1_P < d2_P
+ except Exception as e:
+ res_P = str(type(e))
+ assert res_C == res_P
+ check_same_flags(ctx_C.flags, ctx_P.flags)
diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py
--- a/lib-python/2.7/test/test_os.py
+++ b/lib-python/2.7/test/test_os.py
@@ -580,6 +580,7 @@
"getentropy() does not use a file descriptor")
class URandomFDTests(unittest.TestCase):
@unittest.skipUnless(resource, "test requires the resource module")
+ @test_support.impl_detail(pypy=False) # on Linux, may use getrandom()
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py
--- a/lib-python/2.7/warnings.py
+++ b/lib-python/2.7/warnings.py
@@ -309,9 +309,12 @@
def __init__(self, message, category, filename, lineno, file=None,
line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
+ self.message = message
+ self.category = category
+ self.filename = filename
+ self.lineno = lineno
+ self.file = file
+ self.line = line
self._category_name = category.__name__ if category else None
def __str__(self):
diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py
--- a/lib-python/3/test/test_descr.py
+++ b/lib-python/3/test/test_descr.py
@@ -1663,7 +1663,8 @@
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
- @unittest.expectedFailure
+ #@unittest.expectedFailure --- on CPython. On PyPy, the test passes
+ @support.impl_detail(cpython=False)
def test_bad_new(self):
self.assertRaises(TypeError, object.__new__)
self.assertRaises(TypeError, object.__new__, '')
diff --git a/lib_pypy/_cffi_ssl/README.md b/lib_pypy/_cffi_ssl/README.md
--- a/lib_pypy/_cffi_ssl/README.md
+++ b/lib_pypy/_cffi_ssl/README.md
@@ -5,8 +5,15 @@
it renames the compiled shared object to _pypy_openssl.so (which means
that cryptography can ship their own cffi backend)
-NOTE: currently, we have changed ``_cffi_src/openssl/callbacks.py`` to
-not rely on the CPython C API.
+NOTE: currently, we have the following changes:
+
+* ``_cffi_src/openssl/callbacks.py`` to not rely on the CPython C API
+ (this change is now backported)
+
+* ``_cffi_src/utils.py`` for issue #2575 (29c9a89359e4)
+
+* ``_cffi_src/openssl/x509_vfy.py`` for issue #2605 (ca4d0c90f5a1)
+
# Tests?
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/.build_openssl.py.swn b/lib_pypy/_cffi_ssl/_cffi_src/.build_openssl.py.swn
deleted file mode 100644
index 180c02ff82d3363f34a334aae22c9876d4c96481..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
GIT binary patch
[cut]
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py
--- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py
+++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py
@@ -221,10 +221,16 @@
static const long X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM = 0;
static const long X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED = 0;
static const long X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 = 0;
+#ifndef X509_V_ERR_HOSTNAME_MISMATCH
static const long X509_V_ERR_HOSTNAME_MISMATCH = 0;
+#endif
+#ifndef X509_V_ERR_EMAIL_MISMATCH
static const long X509_V_ERR_EMAIL_MISMATCH = 0;
+#endif
+#ifndef X509_V_ERR_IP_ADDRESS_MISMATCH
static const long X509_V_ERR_IP_ADDRESS_MISMATCH = 0;
#endif
+#endif
/* OpenSSL 1.0.2beta2+ verification parameters */
#if CRYPTOGRAPHY_OPENSSL_102BETA2_OR_GREATER && \
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/utils.py b/lib_pypy/_cffi_ssl/_cffi_src/utils.py
--- a/lib_pypy/_cffi_ssl/_cffi_src/utils.py
+++ b/lib_pypy/_cffi_ssl/_cffi_src/utils.py
@@ -47,9 +47,19 @@
# is legal, but the following will fail to compile:
# int foo(int);
# int foo(short);
+ #
+ # XXX <arigo> No, it is a bad idea. OpenSSL itself tends to tweak
+ # the definitions, like adding a 'const' (see issue #2575). Every
+ # time they do so, it makes a gratuitous break in this code. It is
+ # better to rely on the C compiler for that, which is a little bit
+ # more flexible. That's the point of set_source(). We can still
+ # re-enable the line ``#functions +`` below to get the original
+ # behavior. (I would enable it during tests, but I don't find any
+ # custom test at all..??)
+ #
verify_source = "\n".join(
includes +
- functions +
+ #functions +
customizations
)
ffi = build_ffi(
diff --git a/lib_pypy/_cffi_ssl/_stdssl/.__init__.py.swn b/lib_pypy/_cffi_ssl/_stdssl/.__init__.py.swn
deleted file mode 100644
index 40344f6cee5cb001b73dd3a9a203015568831391..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
GIT binary patch
[cut]
diff --git a/lib_pypy/_cffi_ssl/_stdssl/error.py b/lib_pypy/_cffi_ssl/_stdssl/error.py
--- a/lib_pypy/_cffi_ssl/_stdssl/error.py
+++ b/lib_pypy/_cffi_ssl/_stdssl/error.py
@@ -1,4 +1,5 @@
import sys
+import os
import traceback
from _pypy_openssl import ffi
from _pypy_openssl import lib
@@ -100,18 +101,17 @@
errval = SSL_ERROR_WANT_CONNECT
elif err == SSL_ERROR_SYSCALL:
if e == 0:
- if ret == 0 or obj.socket is not None:
+ if ret == 0 or obj.socket is None:
errtype = SSLEOFError
errstr = "EOF occurred in violation of protocol"
errval = SSL_ERROR_EOF
elif ret == -1 and obj.socket is not None:
# the underlying BIO reported an I/0 error
lib.ERR_clear_error()
- s = obj.get_socket_or_None()
- s.errorhandler()
- assert 0, "must not get here"
- #errno = ffi.errno
- #return IOError(errno)
+ # s = obj.get_socket_or_None()
+ # XXX: Windows?
+ errno = ffi.errno
+ return OSError(errno, os.strerror(errno))
else:
errtype = SSLSyscallError
errstr = "Some I/O error occurred"
diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py
--- a/lib_pypy/_ctypes/array.py
+++ b/lib_pypy/_ctypes/array.py
@@ -74,12 +74,16 @@
return self._type_._alignmentofinstances()
def _CData_output(self, resarray, base=None, index=-1):
- # this seems to be a string if we're array of char, surprise!
- from ctypes import c_char, c_wchar
- if self._type_ is c_char:
- return _rawffi.charp2string(resarray.buffer, self._length_)
- if self._type_ is c_wchar:
- return _rawffi.wcharp2unicode(resarray.buffer, self._length_)
+ from _rawffi.alt import types
+ # If a char_p or unichar_p is received, skip the string interpretation
+ if base._ffiargtype != types.Pointer(types.char_p) and \
+ base._ffiargtype != types.Pointer(types.unichar_p):
+ # this seems to be a string if we're array of char, surprise!
+ from ctypes import c_char, c_wchar
+ if self._type_ is c_char:
+ return _rawffi.charp2string(resarray.buffer, self._length_)
+ if self._type_ is c_wchar:
+ return _rawffi.wcharp2unicode(resarray.buffer, self._length_)
res = self.__new__(self)
ffiarray = self._ffiarray.fromaddress(resarray.buffer, self._length_)
res._buffer = ffiarray
diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py
--- a/lib_pypy/_curses.py
+++ b/lib_pypy/_curses.py
@@ -8,6 +8,9 @@
from _curses_cffi import ffi, lib
+version = b"2.2"
+__version__ = b"2.2"
+
def _copy_to_globals(name):
globals()[name] = getattr(lib, name)
@@ -60,10 +63,6 @@
_setup()
-# Do we want this?
-# version = "2.2"
-# __version__ = "2.2"
-
# ____________________________________________________________
@@ -404,6 +403,17 @@
raise error("getch requires 0 or 2 arguments")
return val
+ def get_wch(self, *args):
+ wch = ffi.new("int[1]")
+ if len(args) == 0:
+ val = lib.wget_wch(self._win, wch)
+ elif len(args) == 2:
+ val = lib.mvwget_wch(self._win, *args, wch)
+ else:
+ raise error("get_wch requires 0 or 2 arguments")
+ _check_ERR(val, "get_wch"):
+ return wch[0]
+
def getkey(self, *args):
if len(args) == 0:
val = lib.wgetch(self._win)
@@ -919,101 +929,29 @@
return None
-# XXX: Do something about the following?
-# /* Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES
-# * and _curses.COLS */
-# #if defined(HAVE_CURSES_RESIZETERM) || defined(HAVE_CURSES_RESIZE_TERM)
-# static int
-# update_lines_cols(void)
-# {
-# PyObject *o;
-# PyObject *m = PyImport_ImportModuleNoBlock("curses");
+# Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES
+# and _curses.COLS
+def update_lines_cols():
+ globals()["LINES"] = lib.LINES
+ globals()["COLS"] = lib.COLS
+ try:
+ m = sys.modules["curses"]
+ m.LINES = lib.LINES
+ m.COLS = lib.COLS
+ except (KeyError, AttributeError):
+ pass
-# if (!m)
-# return 0;
-# o = PyInt_FromLong(LINES);
-# if (!o) {
-# Py_DECREF(m);
-# return 0;
-# }
-# if (PyObject_SetAttrString(m, "LINES", o)) {
-# Py_DECREF(m);
-# Py_DECREF(o);
-# return 0;
-# }
-# if (PyDict_SetItemString(ModDict, "LINES", o)) {
-# Py_DECREF(m);
-# Py_DECREF(o);
-# return 0;
-# }
-# Py_DECREF(o);
-# o = PyInt_FromLong(COLS);
-# if (!o) {
-# Py_DECREF(m);
-# return 0;
-# }
-# if (PyObject_SetAttrString(m, "COLS", o)) {
-# Py_DECREF(m);
-# Py_DECREF(o);
-# return 0;
-# }
-# if (PyDict_SetItemString(ModDict, "COLS", o)) {
-# Py_DECREF(m);
-# Py_DECREF(o);
-# return 0;
-# }
-# Py_DECREF(o);
-# Py_DECREF(m);
-# return 1;
-# }
-# #endif
+def resizeterm(lines, columns):
+ _ensure_initialised()
+ _check_ERR(lib.resizeterm(lines, columns), "resizeterm")
+ update_lines_cols()
-# #ifdef HAVE_CURSES_RESIZETERM
-# static PyObject *
-# PyCurses_ResizeTerm(PyObject *self, PyObject *args)
-# {
-# int lines;
-# int columns;
-# PyObject *result;
-# PyCursesInitialised;
-
-# if (!PyArg_ParseTuple(args,"ii:resizeterm", &lines, &columns))
-# return NULL;
-
-# result = PyCursesCheckERR(resizeterm(lines, columns), "resizeterm");
-# if (!result)
-# return NULL;
-# if (!update_lines_cols())
-# return NULL;
-# return result;
-# }
-
-# #endif
-
-# #ifdef HAVE_CURSES_RESIZE_TERM
-# static PyObject *
-# PyCurses_Resize_Term(PyObject *self, PyObject *args)
-# {
-# int lines;
-# int columns;
-
-# PyObject *result;
-
-# PyCursesInitialised;
-
-# if (!PyArg_ParseTuple(args,"ii:resize_term", &lines, &columns))
-# return NULL;
-
-# result = PyCursesCheckERR(resize_term(lines, columns), "resize_term");
-# if (!result)
-# return NULL;
-# if (!update_lines_cols())
-# return NULL;
-# return result;
-# }
-# #endif /* HAVE_CURSES_RESIZE_TERM */
+def resize_term(lines, columns):
+ _ensure_initialised()
+ _check_ERR(lib.resize_term(lines, columns), "resize_term")
+ update_lines_cols()
def setsyx(y, x):
@@ -1078,6 +1016,11 @@
return _check_ERR(lib.ungetch(_chtype(ch)), "ungetch")
+def unget_wch(ch):
+ _ensure_initialised()
+ return _check_ERR(lib.unget_wch(_chtype(ch)), "unget_wch")
+
+
def use_env(flag):
lib.use_env(flag)
return None
diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py
--- a/lib_pypy/_curses_build.py
+++ b/lib_pypy/_curses_build.py
@@ -1,3 +1,4 @@
+import os
from cffi import FFI, VerificationError
@@ -17,6 +18,11 @@
# error message
raise e_last
+def find_curses_include_dirs():
+ if os.path.exists('/usr/include/ncursesw'):
+ return ['/usr/include/ncursesw']
+ return []
+
ffi = FFI()
@@ -59,7 +65,8 @@
void _m_getsyx(int *yx) {
getsyx(yx[0], yx[1]);
}
-""", libraries=[find_curses_library(), 'panel'])
+""", libraries=[find_curses_library(), 'panel'],
+ include_dirs=find_curses_include_dirs())
ffi.cdef("""
@@ -70,6 +77,8 @@
typedef unsigned long... chtype;
typedef chtype attr_t;
+typedef int... wint_t;
+
typedef struct
{
short id; /* ID to distinguish multiple devices */
@@ -105,6 +114,13 @@
static const chtype A_CHARTEXT;
static const chtype A_COLOR;
+static const chtype A_HORIZONTAL;
+static const chtype A_LEFT;
+static const chtype A_LOW;
+static const chtype A_RIGHT;
+static const chtype A_TOP;
+static const chtype A_VERTICAL;
+
static const int BUTTON1_RELEASED;
static const int BUTTON1_PRESSED;
static const int BUTTON1_CLICKED;
@@ -160,6 +176,8 @@
void filter(void);
int flash(void);
int flushinp(void);
+int wget_wch(WINDOW *, wint_t *);
+int mvwget_wch(WINDOW *, int, int, wint_t *);
chtype getbkgd(WINDOW *);
WINDOW * getwin(FILE *);
int halfdelay(int);
@@ -220,6 +238,8 @@
int resetty(void);
int reset_prog_mode(void);
int reset_shell_mode(void);
+int resizeterm(int, int);
+int resize_term(int, int);
int savetty(void);
int scroll(WINDOW *);
int scrollok(WINDOW *, bool);
@@ -233,6 +253,7 @@
int touchwin(WINDOW *);
int typeahead(int);
int ungetch(int);
+int unget_wch(const wchar_t);
int untouchwin(WINDOW *);
void use_env(bool);
int waddch(WINDOW *, const chtype);
diff --git a/lib_pypy/_decimal.py b/lib_pypy/_decimal.py
--- a/lib_pypy/_decimal.py
+++ b/lib_pypy/_decimal.py
@@ -489,13 +489,16 @@
vv.exp = 0
multiplied = Decimal._new_empty()
denom = Decimal(other.denominator)
- with _CatchStatus(context) as (ctx, status_ptr):
- _mpdec.mpd_qmul(multiplied._mpd, vv, denom._mpd,
- ctx, status_ptr)
- multiplied._mpd.exp += exp # XXX probably a bug
- # in _decimal.c
+ maxctx = _ffi.new("struct mpd_context_t*")
+ _mpdec.mpd_maxcontext(maxctx)
+ status_ptr = _ffi.new("uint32_t*")
+ _mpdec.mpd_qmul(multiplied._mpd, vv, denom._mpd,
+ maxctx, status_ptr)
+ multiplied._mpd.exp = exp
finally:
_mpdec.mpd_del(vv)
+ if status_ptr[0] != 0:
+ raise ValueError("exact conversion for comparison failed")
return multiplied, numerator
else:
@@ -719,8 +722,8 @@
compare = _make_binary_operation('compare')
compare_signal = _make_binary_operation('compare_signal')
- compare_total = _make_binary_operation('compare')
- compare_total_mag = _make_binary_operation('compare')
+ compare_total = _make_binary_operation('compare_total')
+ compare_total_mag = _make_binary_operation('compare_total_mag')
logical_and = _make_binary_operation('logical_and')
logical_or = _make_binary_operation('logical_or')
logical_xor = _make_binary_operation('logical_xor')
diff --git a/lib_pypy/_lzma.py b/lib_pypy/_lzma.py
--- a/lib_pypy/_lzma.py
+++ b/lib_pypy/_lzma.py
@@ -10,6 +10,7 @@
import weakref
import sys
import io
+import __pypy__
from _lzma_cffi import ffi, lib as m
@@ -63,6 +64,10 @@
m._pylzma_stream_init(ret)
return ffi.gc(ret, m.lzma_end)
+def _release_lzma_stream(st):
+ ffi.gc(st, None)
+ m.lzma_end(st)
+
def add_constant(c):
globals()[c] = getattr(m, 'LZMA_' + c)
@@ -148,39 +153,39 @@
def parse_filter_spec_lzma(id, preset=m.LZMA_PRESET_DEFAULT, **kwargs):
ret = ffi.new('lzma_options_lzma*')
if m.lzma_lzma_preset(ret, preset):
- raise LZMAError("Invalid...")
+ raise LZMAError("Invalid compression preset: %s" % preset)
for arg, val in kwargs.items():
if arg in ('dict_size', 'lc', 'lp', 'pb', 'nice_len', 'depth'):
setattr(ret, arg, val)
elif arg in ('mf', 'mode'):
setattr(ret, arg, int(val))
else:
- raise ValueError("Invalid...")
+ raise ValueError("Invalid filter specifier for LZMA filter")
return ret
def parse_filter_spec(spec):
if not isinstance(spec, collections.Mapping):
- raise TypeError("Filter...")
+ raise TypeError("Filter specifier must be a dict or dict-like object")
ret = ffi.new('lzma_filter*')
try:
ret.id = spec['id']
except KeyError:
- raise ValueError("Filter...")
+ raise ValueError("Filter specifier must have an \"id\" entry")
if ret.id in (m.LZMA_FILTER_LZMA1, m.LZMA_FILTER_LZMA2):
try:
options = parse_filter_spec_lzma(**spec)
except TypeError:
- raise ValueError("Invalid...")
+ raise ValueError("Invalid filter specifier for LZMA filter")
elif ret.id == m.LZMA_FILTER_DELTA:
try:
options = parse_filter_spec_delta(**spec)
except TypeError:
- raise ValueError("Invalid...")
+ raise ValueError("Invalid filter specifier for delta filter")
elif ret.id in BCJ_FILTERS:
try:
options = parse_filter_spec_bcj(**spec)
except TypeError:
- raise ValueError("Invalid...")
+ raise ValueError("Invalid filter specifier for BCJ filter")
else:
raise ValueError("Invalid %d" % (ret.id,))
@@ -204,7 +209,9 @@
def parse_filter_chain_spec(filterspecs):
if len(filterspecs) > m.LZMA_FILTERS_MAX:
- raise ValueError("Too...")
+ raise ValueError(
+ "Too many filters - liblzma supports a maximum of %s" %
+ m.LZMA_FILTERS_MAX)
filters = ffi.new('lzma_filter[]', m.LZMA_FILTERS_MAX+1)
_owns[filters] = children = []
for i in range(m.LZMA_FILTERS_MAX+1):
@@ -236,7 +243,7 @@
elif filter.id in BCJ_FILTERS:
add_opts('lzma_options_bcj', 'start_offset')
else:
- raise ValueError("Invalid...")
+ raise ValueError("Invalid filter ID: %s" % filter.id)
return spec
def _decode_filter_properties(filter_id, encoded_props):
@@ -420,25 +427,26 @@
For one-shot decompression, use the decompress() function instead.
"""
- def __init__(self, format=FORMAT_AUTO, memlimit=None, filters=None, header=None, check=None, unpadded_size=None):
+ def __init__(self, format=FORMAT_AUTO, memlimit=None, filters=None,
+ header=None, check=None, unpadded_size=None):
decoder_flags = m.LZMA_TELL_ANY_CHECK | m.LZMA_TELL_NO_CHECK
- #decoder_flags = 0
if memlimit is not None:
if format == FORMAT_RAW:
- raise ValueError("Cannot sp...")
- #memlimit = long(memlimit)
+ raise ValueError("Cannot specify memory limit with FORMAT_RAW")
else:
memlimit = m.UINT64_MAX
if format == FORMAT_RAW and filters is None:
- raise ValueError("Must...")
+ raise ValueError("Must specify filters for FORMAT_RAW")
elif format != FORMAT_RAW and filters is not None:
- raise ValueError("Cannot...")
+ raise ValueError("Cannot specify filters except with FORMAT_RAW")
if format == FORMAT_BLOCK and (header is None or unpadded_size is None or check is None):
- raise ValueError("Must...")
+ raise ValueError("Must specify header, unpadded_size and check "
+ "with FORMAT_BLOCK")
elif format != FORMAT_BLOCK and (header is not None or unpadded_size is not None or check is not None):
- raise ValueError("Cannot...")
+ raise ValueError("Cannot specify header, unpadded_size or check "
+ "except with FORMAT_BLOCK")
format = _parse_format(format)
self.lock = threading.Lock()
@@ -476,7 +484,7 @@
self.expected_size = block.compressed_size
catch_lzma_error(m.lzma_block_decoder, self.lzs, block)
else:
- raise ValueError("invalid...")
+ raise ValueError("invalid container format: %s" % format)
def pre_decompress_left_data(self, buf, buf_size):
# in this case there is data left that needs to be processed before the first
@@ -551,7 +559,7 @@
raise TypeError("max_length parameter object cannot be interpreted as an integer")
with self.lock:
if self.eof:
- raise EOFError("Already...")
+ raise EOFError("Already at end of stream")
lzs = self.lzs
data = to_bytes(data)
buf = ffi.new('uint8_t[]', data)
@@ -648,6 +656,16 @@
raise TypeError("cannot serialize '%s' object" %
self.__class__.__name__)
+
+# Issue #2579: Setting up the stream for encoding takes around 17MB of
+# RAM on my Linux 64 system. So we call add_memory_pressure(17MB) when
+# we create the stream. In flush(), we actively free the stream even
+# though we could just leave it to the GC (but 17MB is too much for
+# doing that sanely); at this point we call add_memory_pressure(-17MB)
+# to cancel the original increase.
+COMPRESSION_STREAM_SIZE = 1024*1024*17
+
+
class LZMACompressor(object):
"""
LZMACompressor(format=FORMAT_XZ, check=-1, preset=None, filters=None)
@@ -679,15 +697,16 @@
"""
def __init__(self, format=FORMAT_XZ, check=-1, preset=None, filters=None):
if format != FORMAT_XZ and check not in (-1, m.LZMA_CHECK_NONE):
- raise ValueError("Integrity...")
+ raise ValueError("Integrity checks are only supported by FORMAT_XZ")
if preset is not None and filters is not None:
- raise ValueError("Cannot...")
+ raise ValueError("Cannot specify both preset and filter chain")
if preset is None:
preset = m.LZMA_PRESET_DEFAULT
format = _parse_format(format)
self.lock = threading.Lock()
self.flushed = 0
self.lzs = _new_lzma_stream()
+ __pypy__.add_memory_pressure(COMPRESSION_STREAM_SIZE)
if format == FORMAT_XZ:
if filters is None:
if check == -1:
@@ -702,19 +721,19 @@
if filters is None:
options = ffi.new('lzma_options_lzma*')
if m.lzma_lzma_preset(options, preset):
- raise LZMAError("Invalid...")
+ raise LZMAError("Invalid compression preset: %s" % preset)
catch_lzma_error(m.lzma_alone_encoder, self.lzs,
options)
else:
raise NotImplementedError
elif format == FORMAT_RAW:
if filters is None:
- raise ValueError("Must...")
+ raise ValueError("Must specify filters for FORMAT_RAW")
filters = parse_filter_chain_spec(filters)
catch_lzma_error(m.lzma_raw_encoder, self.lzs,
filters)
else:
- raise ValueError("Invalid...")
+ raise ValueError("invalid container format: %s" % format)
def compress(self, data):
"""
@@ -728,7 +747,7 @@
"""
with self.lock:
if self.flushed:
- raise ValueError("Compressor...")
+ raise ValueError("Compressor has been flushed")
return self._compress(data)
def _compress(self, data, action=m.LZMA_RUN):
@@ -769,9 +788,12 @@
def flush(self):
with self.lock:
if self.flushed:
- raise ValueError("Repeated...")
+ raise ValueError("Repeated call to flush()")
self.flushed = 1
- return self._compress(b'', action=m.LZMA_FINISH)
+ result = self._compress(b'', action=m.LZMA_FINISH)
+ __pypy__.add_memory_pressure(-COMPRESSION_STREAM_SIZE)
+ _release_lzma_stream(self.lzs)
+ return result
def __getstate__(self):
raise TypeError("cannot serialize '%s' object" %
diff --git a/lib_pypy/cffi/_cffi_errors.h b/lib_pypy/cffi/_cffi_errors.h
new file mode 100644
--- /dev/null
+++ b/lib_pypy/cffi/_cffi_errors.h
@@ -0,0 +1,145 @@
+#ifndef CFFI_MESSAGEBOX
+# ifdef _MSC_VER
+# define CFFI_MESSAGEBOX 1
+# else
+# define CFFI_MESSAGEBOX 0
+# endif
+#endif
+
+
+#if CFFI_MESSAGEBOX
+/* Windows only: logic to take the Python-CFFI embedding logic
+ initialization errors and display them in a background thread
+ with MessageBox. The idea is that if the whole program closes
+ as a result of this problem, then likely it is already a console
+ program and you can read the stderr output in the console too.
+ If it is not a console program, then it will likely show its own
+ dialog to complain, or generally not abruptly close, and for this
+ case the background thread should stay alive.
+*/
+static void *volatile _cffi_bootstrap_text;
+
+static PyObject *_cffi_start_error_capture(void)
+{
+ PyObject *result = NULL;
+ PyObject *x, *m, *bi;
+
+ if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text,
+ (void *)1, NULL) != NULL)
+ return (PyObject *)1;
+
+ m = PyImport_AddModule("_cffi_error_capture");
+ if (m == NULL)
+ goto error;
+
+ result = PyModule_GetDict(m);
+ if (result == NULL)
+ goto error;
+
+#if PY_MAJOR_VERSION >= 3
+ bi = PyImport_ImportModule("builtins");
+#else
+ bi = PyImport_ImportModule("__builtin__");
+#endif
+ if (bi == NULL)
+ goto error;
+ PyDict_SetItemString(result, "__builtins__", bi);
+ Py_DECREF(bi);
+
+ x = PyRun_String(
+ "import sys\n"
+ "class FileLike:\n"
+ " def write(self, x):\n"
+ " of.write(x)\n"
+ " self.buf += x\n"
+ "fl = FileLike()\n"
+ "fl.buf = ''\n"
+ "of = sys.stderr\n"
+ "sys.stderr = fl\n"
+ "def done():\n"
+ " sys.stderr = of\n"
+ " return fl.buf\n", /* make sure the returned value stays alive */
+ Py_file_input,
+ result, result);
+ Py_XDECREF(x);
+
+ error:
+ if (PyErr_Occurred())
+ {
+ PyErr_WriteUnraisable(Py_None);
+ PyErr_Clear();
+ }
+ return result;
+}
+
+#pragma comment(lib, "user32.lib")
+
+static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored)
+{
+ Sleep(666); /* may be interrupted if the whole process is closing */
+#if PY_MAJOR_VERSION >= 3
+ MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text,
+ L"Python-CFFI error",
+ MB_OK | MB_ICONERROR);
+#else
+ MessageBoxA(NULL, (char *)_cffi_bootstrap_text,
+ "Python-CFFI error",
+ MB_OK | MB_ICONERROR);
+#endif
+ _cffi_bootstrap_text = NULL;
+ return 0;
+}
+
+static void _cffi_stop_error_capture(PyObject *ecap)
+{
+ PyObject *s;
+ void *text;
+
+ if (ecap == (PyObject *)1)
+ return;
+
+ if (ecap == NULL)
+ goto error;
+
+ s = PyRun_String("done()", Py_eval_input, ecap, ecap);
+ if (s == NULL)
+ goto error;
+
+ /* Show a dialog box, but in a background thread, and
+ never show multiple dialog boxes at once. */
+#if PY_MAJOR_VERSION >= 3
+ text = PyUnicode_AsWideCharString(s, NULL);
+#else
+ text = PyString_AsString(s);
+#endif
+
+ _cffi_bootstrap_text = text;
+
+ if (text != NULL)
+ {
+ HANDLE h;
+ h = CreateThread(NULL, 0, _cffi_bootstrap_dialog,
+ NULL, 0, NULL);
+ if (h != NULL)
+ CloseHandle(h);
+ }
+ /* decref the string, but it should stay alive as 'fl.buf'
+ in the small module above. It will really be freed only if
+ we later get another similar error. So it's a leak of at
+ most one copy of the small module. That's fine for this
+ situation which is usually a "fatal error" anyway. */
+ Py_DECREF(s);
+ PyErr_Clear();
+ return;
+
+ error:
+ _cffi_bootstrap_text = NULL;
+ PyErr_Clear();
+}
+
+#else
+
+static PyObject *_cffi_start_error_capture(void) { return NULL; }
+static void _cffi_stop_error_capture(PyObject *ecap) { }
+
+#endif
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -159,9 +159,9 @@
#define _cffi_from_c_struct \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
- ((wchar_t(*)(PyObject *))_cffi_exports[19])
+ ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
- ((PyObject *(*)(wchar_t))_cffi_exports[20])
+ ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
@@ -174,7 +174,11 @@
#define _CFFI_CPIDX 25
#define _cffi_call_python \
((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX])
-#define _CFFI_NUM_EXPORTS 26
+#define _cffi_to_c_wchar3216_t \
+ ((int(*)(PyObject *))_cffi_exports[26])
+#define _cffi_from_c_wchar3216_t \
+ ((PyObject *(*)(int))_cffi_exports[27])
+#define _CFFI_NUM_EXPORTS 28
struct _cffi_ctypedescr;
@@ -215,6 +219,46 @@
return NULL;
}
+
+#ifdef HAVE_WCHAR_H
+typedef wchar_t _cffi_wchar_t;
+#else
+typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */
+#endif
+
+_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o)
+{
+ if (sizeof(_cffi_wchar_t) == 2)
+ return (uint16_t)_cffi_to_c_wchar_t(o);
+ else
+ return (uint16_t)_cffi_to_c_wchar3216_t(o);
+}
+
+_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x)
+{
+ if (sizeof(_cffi_wchar_t) == 2)
+ return _cffi_from_c_wchar_t(x);
+ else
+ return _cffi_from_c_wchar3216_t(x);
+}
+
+_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o)
+{
+ if (sizeof(_cffi_wchar_t) == 4)
+ return (int)_cffi_to_c_wchar_t(o);
+ else
+ return (int)_cffi_to_c_wchar3216_t(o);
+}
+
+_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x)
+{
+ if (sizeof(_cffi_wchar_t) == 4)
+ return _cffi_from_c_wchar_t(x);
+ else
+ return _cffi_from_c_wchar3216_t(x);
+}
+
+
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -109,6 +109,8 @@
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
+#include "_cffi_errors.h"
+
#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX]
@@ -220,8 +222,16 @@
/* Print as much information as potentially useful.
Debugging load-time failures with embedding is not fun
*/
+ PyObject *ecap;
PyObject *exception, *v, *tb, *f, *modules, *mod;
PyErr_Fetch(&exception, &v, &tb);
+ ecap = _cffi_start_error_capture();
+ f = PySys_GetObject((char *)"stderr");
+ if (f != NULL && f != Py_None) {
+ PyFile_WriteString(
+ "Failed to initialize the Python-CFFI embedding logic:\n\n", f);
+ }
+
if (exception != NULL) {
PyErr_NormalizeException(&exception, &v, &tb);
PyErr_Display(exception, v, tb);
@@ -230,7 +240,6 @@
Py_XDECREF(v);
Py_XDECREF(tb);
- f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
"\ncompiled with cffi version: 1.11.0"
@@ -249,6 +258,7 @@
PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0);
PyFile_WriteString("\n\n", f);
}
+ _cffi_stop_error_capture(ecap);
}
result = -1;
goto done;
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -75,9 +75,10 @@
self._init_once_cache = {}
self._cdef_version = None
self._embedding = None
+ self._typecache = model.get_typecache(backend)
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
- for name in backend.__dict__:
+ for name in list(backend.__dict__):
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
@@ -764,7 +765,7 @@
if sys.platform != "win32":
return backend.load_library(None, flags)
name = "c" # Windows: load_library(None) fails, but this works
- # (backward compatibility hack only)
+ # on Python 2 (backward compatibility hack only)
first_error = None
if '.' in name or '/' in name or os.sep in name:
try:
@@ -774,6 +775,9 @@
import ctypes.util
path = ctypes.util.find_library(name)
if path is None:
+ if name == "c" and sys.platform == "win32" and sys.version_info >= (3,):
+ raise OSError("dlopen(None) cannot work on Windows for Python 3 "
+ "(see http://bugs.python.org/issue23606)")
msg = ("ctypes.util.find_library() did not manage "
"to locate a library called %r" % (name,))
if first_error is not None:
diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py
--- a/lib_pypy/cffi/cffi_opcode.py
+++ b/lib_pypy/cffi/cffi_opcode.py
@@ -107,9 +107,10 @@
PRIM_UINTMAX = 47
PRIM_FLOATCOMPLEX = 48
PRIM_DOUBLECOMPLEX = 49
+PRIM_CHAR16 = 50
+PRIM_CHAR32 = 51
-
-_NUM_PRIM = 50
+_NUM_PRIM = 52
_UNKNOWN_PRIM = -1
_UNKNOWN_FLOAT_PRIM = -2
_UNKNOWN_LONG_DOUBLE = -3
@@ -135,6 +136,8 @@
'double _Complex': PRIM_DOUBLECOMPLEX,
'_Bool': PRIM_BOOL,
'wchar_t': PRIM_WCHAR,
+ 'char16_t': PRIM_CHAR16,
+ 'char32_t': PRIM_CHAR32,
'int8_t': PRIM_INT8,
'uint8_t': PRIM_UINT8,
'int16_t': PRIM_INT16,
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -122,6 +122,8 @@
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
+ 'char16_t': 'c',
+ 'char32_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
@@ -566,22 +568,26 @@
global_lock = allocate_lock()
+_typecache_cffi_backend = weakref.WeakValueDictionary()
+
+def get_typecache(backend):
+ # returns _typecache_cffi_backend if backend is the _cffi_backend
+ # module, or type(backend).__typecache if backend is an instance of
+ # CTypesBackend (or some FakeBackend class during tests)
+ if isinstance(backend, types.ModuleType):
+ return _typecache_cffi_backend
+ with global_lock:
+ if not hasattr(type(backend), '__typecache'):
+ type(backend).__typecache = weakref.WeakValueDictionary()
+ return type(backend).__typecache
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
- return ffi._backend.__typecache[key]
+ return ffi._typecache[key]
except KeyError:
pass
- except AttributeError:
- # initialize the __typecache attribute, either at the module level
- # if ffi._backend is a module, or at the class level if ffi._backend
- # is some instance.
- if isinstance(ffi._backend, types.ModuleType):
- ffi._backend.__typecache = weakref.WeakValueDictionary()
- else:
- type(ffi._backend).__typecache = weakref.WeakValueDictionary()
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
@@ -589,7 +595,7 @@
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
- cache = ffi._backend.__typecache
+ cache = ffi._typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -81,8 +81,10 @@
#define _CFFI_PRIM_UINTMAX 47
#define _CFFI_PRIM_FLOATCOMPLEX 48
#define _CFFI_PRIM_DOUBLECOMPLEX 49
+#define _CFFI_PRIM_CHAR16 50
+#define _CFFI_PRIM_CHAR32 51
-#define _CFFI__NUM_PRIM 50
+#define _CFFI__NUM_PRIM 52
#define _CFFI__UNKNOWN_PRIM (-1)
#define _CFFI__UNKNOWN_FLOAT_PRIM (-2)
#define _CFFI__UNKNOWN_LONG_DOUBLE (-3)
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -3,8 +3,9 @@
from .error import VerificationError
from .cffi_opcode import *
-VERSION = "0x2601"
-VERSION_EMBEDDED = "0x2701"
+VERSION_BASE = 0x2601
+VERSION_EMBEDDED = 0x2701
+VERSION_CHAR16CHAR32 = 0x2801
class GlobalExpr:
@@ -126,6 +127,10 @@
self.ffi = ffi
self.module_name = module_name
self.target_is_python = target_is_python
+ self._version = VERSION_BASE
+
+ def needs_version(self, ver):
+ self._version = max(self._version, ver)
def collect_type_table(self):
self._typesdict = {}
@@ -303,10 +308,10 @@
base_module_name,))
prnt('#endif')
lines = self._rel_readlines('_embedding.h')
+ i = lines.index('#include "_cffi_errors.h"\n')
+ lines[i:i+1] = self._rel_readlines('_cffi_errors.h')
prnt(''.join(lines))
- version = VERSION_EMBEDDED
- else:
- version = VERSION
+ self.needs_version(VERSION_EMBEDDED)
#
# then paste the C source given by the user, verbatim.
prnt('/************************************************************/')
@@ -405,7 +410,7 @@
prnt(' _cffi_call_python_org = '
'(void(*)(struct _cffi_externpy_s *, char *))p[1];')
prnt(' }')
- prnt(' p[0] = (const void *)%s;' % version)
+ prnt(' p[0] = (const void *)0x%x;' % self._version)
prnt(' p[1] = &_cffi_type_context;')
prnt('}')
# on Windows, distutils insists on putting init_cffi_xyz in
@@ -423,21 +428,22 @@
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % (base_module_name,))
prnt('{')
- prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % (
- self.module_name, version))
+ prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
+ self.module_name, self._version))
prnt('}')
prnt('#else')
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % (base_module_name,))
prnt('{')
- prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % (
- self.module_name, version))
+ prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
+ self.module_name, self._version))
prnt('}')
prnt('#endif')
prnt()
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility pop')
prnt('#endif')
+ self._version = None
def _to_py(self, x):
if isinstance(x, str):
@@ -476,7 +482,8 @@
prnt('from %s import ffi as _ffi%d' % (included_module_name, i))
prnt()
prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,))
- prnt(" _version = %s," % (VERSION,))
+ prnt(" _version = 0x%x," % (self._version,))
+ self._version = None
#
# the '_types' keyword argument
self.cffi_types = tuple(self.cffi_types) # don't change any more
@@ -515,8 +522,11 @@
# double' here, and _cffi_to_c_double would loose precision
converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),)
else:
- converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
+ cname = tp.get_c_name('')
+ converter = '(%s)_cffi_to_c_%s' % (cname,
tp.name.replace(' ', '_'))
+ if cname in ('char16_t', 'char32_t'):
+ self.needs_version(VERSION_CHAR16CHAR32)
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
@@ -573,7 +583,10 @@
elif isinstance(tp, model.UnknownFloatType):
return '_cffi_from_c_double(%s)' % (var,)
elif tp.name != 'long double' and not tp.is_complex_type():
- return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
+ cname = tp.name.replace(' ', '_')
+ if cname in ('char16_t', 'char32_t'):
+ self.needs_version(VERSION_CHAR16CHAR32)
+ return '_cffi_from_c_%s(%s)' % (cname, var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -808,7 +808,8 @@
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
- c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
+ c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
+ and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
@@ -842,11 +843,13 @@
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include <stdint.h>
-# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
+# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py
--- a/lib_pypy/cffi/vengine_gen.py
+++ b/lib_pypy/cffi/vengine_gen.py
@@ -627,7 +627,8 @@
#include <sys/types.h> /* XXX for ssize_t on some platforms */
/* this block of #ifs should be kept exactly identical between
- c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
+ c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
+ and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
@@ -661,11 +662,13 @@
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include <stdint.h>
-# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
+# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py
--- a/lib_pypy/stackless.py
+++ b/lib_pypy/stackless.py
@@ -268,12 +268,22 @@
assert abs(d) == 1
source = getcurrent()
source.tempval = arg
- if d > 0:
- cando = self.balance < 0
- dir = d
- else:
- cando = self.balance > 0
- dir = 0
+ while True:
+ if d > 0:
+ cando = self.balance < 0
+ dir = d
+ else:
+ cando = self.balance > 0
+ dir = 0
+
+ if cando and self.queue[0]._tasklet_killed:
+ # issue #2595: the tasklet was killed while waiting.
+ # drop that tasklet from consideration and try again.
+ self.balance += d
+ self.queue.popleft()
+ else:
+ # normal path
+ break
if _channel_callback is not None:
_channel_callback(self, source, dir, not cando)
@@ -348,6 +358,8 @@
module.
"""
tempval = None
+ _tasklet_killed = False
+
def __new__(cls, func=None, label=''):
res = coroutine.__new__(cls)
res.label = label
@@ -395,6 +407,7 @@
If the exception passes the toplevel frame of the tasklet,
the tasklet will silently die.
"""
+ self._tasklet_killed = True
if not self.is_zombie:
# Killing the tasklet by throwing TaskletExit exception.
coroutine.kill(self)
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -93,7 +93,8 @@
libsqlite3
curses
- libncurses
+ libncurses-dev (for PyPy2)
+ libncursesw-dev (for PyPy3)
gdbm
libgdbm-dev
@@ -106,12 +107,13 @@
To run untranslated tests, you need the Boehm garbage collector libgc.
-On Debian, this is the command to install all build-time dependencies::
+On Debian and Ubuntu, this is the command to install all build-time
+dependencies::
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
tk-dev libgc-dev python-cffi \
- liblzma-dev # For lzma on PyPy3.
+ liblzma-dev libncursesw-dev # these two only needed on PyPy3
On Fedora::
@@ -195,6 +197,29 @@
``/tmp/usession-YOURNAME/build/``. You can then either move the file
hierarchy or unpack the ``.tar.bz2`` at the correct place.
+It is recommended to use package.py because custom scripts will
+invariably become out-of-date. If you want to write custom scripts
+anyway, note an easy-to-miss point: some modules are written with CFFI,
+and require some compilation. If you install PyPy as root without
+pre-compiling them, normal users will get errors:
+
+* PyPy 2.5.1 or earlier: normal users would see permission errors.
+ Installers need to run ``pypy -c "import gdbm"`` and other similar
+ commands at install time; the exact list is in `package.py`_. Users
+ seeing a broken installation of PyPy can fix it after-the-fact if they
+ have sudo rights, by running once e.g. ``sudo pypy -c "import gdbm``.
+
+* PyPy 2.6 and later: anyone would get ``ImportError: no module named
+ _gdbm_cffi``. Installers need to run ``pypy _gdbm_build.py`` in the
+ ``lib_pypy`` directory during the installation process (plus others;
+ see the exact list in `package.py`_). Users seeing a broken
+ installation of PyPy can fix it after-the-fact, by running ``pypy
+ /path/to/lib_pypy/_gdbm_build.py``. This command produces a file
+ called ``_gdbm_cffi.pypy-41.so`` locally, which is a C extension
+ module for PyPy. You can move it at any place where modules are
+ normally found: e.g. in your project's main directory, or in a
+ directory that you add to the env var ``PYTHONPATH``.
+
Installation
------------
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -59,16 +59,16 @@
# General information about the project.
project = u'PyPy'
-copyright = u'2016, The PyPy Project'
+copyright = u'2017, The PyPy Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = '5.4'
+version = '5.8'
# The full version, including alpha/beta/rc tags.
-release = '5.4.0'
+release = '5.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -6,11 +6,11 @@
Armin Rigo
Maciej Fijalkowski
- Carl Friedrich Bolz
+ Carl Friedrich Bolz-Tereick
Amaury Forgeot d'Arc
Antonio Cuni
+ Matti Picus
Samuele Pedroni
- Matti Picus
Ronan Lamy
Alex Gaynor
Philip Jenvey
@@ -68,28 +68,28 @@
Vincent Legoll
Michael Foord
Stephan Diehl
+ Stefano Rivera
Stefan Schwarzer
Tomek Meka
Valentino Volonghi
- Stefano Rivera
Patrick Maupin
Devin Jeanpierre
Bob Ippolito
Bruno Gola
David Malcolm
Jean-Paul Calderone
+ Squeaky
Edd Barrett
- Squeaky
Timo Paulssen
Marius Gedminas
Alexandre Fayolle
Simon Burton
Nicolas Truessel
Martin Matusiak
+ Laurence Tratt
Wenzhu Man
Konstantin Lopuhin
John Witulski
- Laurence Tratt
Greg Price
Ivan Sichmann Freitas
Dario Bertini
@@ -116,13 +116,13 @@
Stian Andreassen
Wanja Saatkamp
Mike Blume
+ Joannah Nanjekye
Gerald Klix
Oscar Nierstrasz
Rami Chowdhury
Stefan H. Muller
- Joannah Nanjekye
+ Tim Felgentreff
Eugene Oden
- Tim Felgentreff
Jeff Terrace
Henry Mason
Vasily Kuznetsov
@@ -131,11 +131,11 @@
Dusty Phillips
Lukas Renggli
Guenter Jantzen
+ Jasper Schulz
Ned Batchelder
Amit Regmi
Anton Gulenko
Sergey Matyunin
- Jasper Schulz
Andrew Chambers
Nicolas Chauvat
Andrew Durdin
@@ -150,6 +150,7 @@
Gintautas Miliauskas
Lucian Branescu Mihaila
anatoly techtonik
+ Dodan Mihai
Karl Bartel
Gabriel Lavoie
Jared Grubb
@@ -187,12 +188,14 @@
Vaibhav Sood
Reuben Cummings
Attila Gobi
+ Alecsandru Patrascu
Christopher Pope
Tristan Arthur
Christian Tismer
Dan Stromberg
Carl Meyer
Florin Papa
+ Jens-Uwe Mager
Valentina Mukhamedzhanova
Stefano Parmesan
touilleMan
@@ -231,7 +234,6 @@
Dan Buch
Lene Wagner
Tomo Cocoa
- Alecsandru Patrascu
David Lievens
Neil Blakey-Milner
Henrik Vendelbo
@@ -270,6 +272,7 @@
Anna Katrina Dominguez
Kim Jin Su
Amber Brown
+ Nate Bragg
Ben Darnell
Juan Francisco Cantero Hurtado
Godefroid Chappelle
@@ -307,11 +310,13 @@
Jim Hunziker
shoma hosaka
Buck Golemon
+ Iraklis D.
JohnDoe
yrttyr
Michael Chermside
Anna Ravencroft
remarkablerocket
+ Petre Vijiac
Berker Peksag
Christian Muirhead
soareschen
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -60,7 +60,7 @@
The interface for full finalizers is made with PyPy in mind, but should
be generally useful.
-The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+The idea is that you subclass the ``rgc.FinalizerQueue`` class:
* You must give a class-level attribute ``base_class``, which is the
base class of all instances with a finalizer. (If you need
diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst
--- a/pypy/doc/discussion/rawrefcount.rst
+++ b/pypy/doc/discussion/rawrefcount.rst
@@ -68,10 +68,12 @@
and O = list of links created with rawrefcount.create_link_pyobj().
The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all
the data is in the PyObjects, and all outsite references (if any) are
-in C, as "PyObject *" fields.
+in C, as ``PyObject *`` fields.
So, during the collection we do this about P links:
+.. code-block:: python
+
for (p, ob) in P:
if ob->ob_refcnt != REFCNT_FROM_PYPY
and ob->ob_refcnt != REFCNT_FROM_PYPY_LIGHT:
@@ -80,6 +82,8 @@
At the end of the collection, the P and O links are both handled like
this:
+.. code-block:: python
+
for (p, ob) in P + O:
if p is not surviving: # even if 'ob' might be surviving
unlink p and ob
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -5,8 +5,8 @@
++++++++++++++
We try to create a stable release a few times a year. These are released on
-a branch named like release-2.x or release-4.x, and each release is tagged,
-for instance release-4.0.1.
+a branch named like release-pypy3.5-v2.x or release-pypy3.5-v4.x, and each
+release is tagged, for instance release-pypy3.5-v4.0.1.
After release, inevitably there are bug fixes. It is the responsibility of
the commiter who fixes a bug to make sure this fix is on the release branch,
@@ -33,7 +33,7 @@
* If needed, make a release branch
* Bump the
pypy version number in module/sys/version.py and in
- module/cpyext/include/patchlevel.h and . The branch
+ module/cpyext/include/patchlevel.h and in doc/conf.py. The branch
will capture the revision number of this change for the release.
Some of the next updates may be done before or after branching; make
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -30,12 +30,22 @@
whatsnew-2.0.0-beta1.rst
whatsnew-1.9.rst
+CPython 3.5 compatible versions
+-------------------------------
+
+.. toctree::
+
+ whatsnew-pypy3-head.rst
+ whatsnew-pypy3-5.8.0.rst
+ whatsnew-pypy3-5.7.0.rst
+
CPython 3.3 compatible versions
-------------------------------
.. toctree::
whatsnew-pypy3-5.5.0.rst
+ whatsnew-pypy3-5.1.1-alpha1.rst
CPython 3.2 compatible versions
-------------------------------
diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst
--- a/pypy/doc/install.rst
+++ b/pypy/doc/install.rst
@@ -12,6 +12,7 @@
and using pip.
.. _prebuilt-pypy:
+
Download a pre-built PyPy
~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst
--- a/pypy/doc/objspace.rst
+++ b/pypy/doc/objspace.rst
@@ -250,12 +250,12 @@
.. py:function:: newunicode(ustr)
Creates a Unicode string from an rpython unicode string.
- This method may disappear soon and be replaced by :py:function:`newutf8()`.
+ This method may disappear soon and be replaced by :py:function::`newutf8`.
.. py:function:: newutf8(bytestr)
Creates a Unicode string from an rpython byte string, decoded as
- "utf-8-nosg". On PyPy3 it is the same as :py:function:`newtext()`.
+ "utf-8-nosg". On PyPy3 it is the same as :py:function::`newtext`.
Many more space operations can be found in `pypy/interpeter/baseobjspace.py` and
`pypy/objspace/std/objspace.py`.
@@ -302,9 +302,9 @@
.. py:function:: unicode_w(w_x)
- Takes an application level :py:class:`unicode` and return an
+ Takes an application level :py:class::`unicode` and return an
interpreter-level unicode string. This method may disappear soon and
- be replaced by :py:function:`text_w()`.
+ be replaced by :py:function::`text_w`.
.. py:function:: float_w(w_x)
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -238,18 +238,17 @@
using more pypy-friendly technologies, e.g. cffi. Here is a partial list of
good work that needs to be finished:
-**matplotlib** https://github.com/mattip/matplotlib
+**matplotlib** https://github.com/matplotlib/matplotlib
- Status: the repo is an older version of matplotlib adapted to pypy and cpyext
+ TODO: the tkagg backend does not work, which makes tests fail on downstream
+ projects like Pandas, SciPy. It uses id(obj) as a c-pointer to obj in
+ tkagg.py, which requires refactoring
- TODO: A suggested first step would be to merge the differences into
More information about the pypy-commit
mailing list