[pypy-commit] pypy reverse-debugger: hg merge default

arigo pypy.commits at gmail.com
Wed Aug 24 13:06:40 EDT 2016


Author: Armin Rigo <arigo at tunes.org>
Branch: reverse-debugger
Changeset: r86505:670d925b3853
Date: 2016-08-24 19:06 +0200
http://bitbucket.org/pypy/pypy/changeset/670d925b3853/

Log:	hg merge default

diff too long, truncating to 2000 out of 15666 lines

diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -85,10 +85,11 @@
     pass
 
 def splitline(line, re_word = re.compile(r'[^\s"]\S*|["]["]|["].*?[^\\]["]')):
+    import ast
     result = []
     for word in re_word.findall(line):
         if word.startswith('"'):
-            word = eval(word)
+            word = ast.literal_eval(word)
         result.append(word)
     return result
 
diff --git a/include/PyPy.h b/include/PyPy.h
--- a/include/PyPy.h
+++ b/include/PyPy.h
@@ -2,7 +2,11 @@
 #define _PYPY_H_
 
 /* This header is meant to be included in programs that use PyPy as an
-   embedded library. */
+   embedded library.
+
+   NOTE: this is deprecated.  Instead, use cffi's embedding support:
+   http://cffi.readthedocs.org/en/latest/embedding.html
+*/
 
 #ifdef __cplusplus
 extern "C" {
diff --git a/lib-python/2.7/test/test_hash.py b/lib-python/2.7/test/test_hash.py
--- a/lib-python/2.7/test/test_hash.py
+++ b/lib-python/2.7/test/test_hash.py
@@ -174,7 +174,7 @@
 
 class StringlikeHashRandomizationTests(HashRandomizationTests):
     if check_impl_detail(pypy=True):
-        EMPTY_STRING_HASH = -1
+        EMPTY_STRING_HASH = -2
     else:
         EMPTY_STRING_HASH = 0
 
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -167,7 +167,7 @@
         else:
             return self.value
 
-    def __buffer__(self):
+    def __buffer__(self, flags):
         return buffer(self._buffer)
 
     def _get_b_base(self):
@@ -199,10 +199,13 @@
     return tp._alignmentofinstances()
 
 @builtinify
-def byref(cdata):
+def byref(cdata, offset=0):
     # "pointer" is imported at the end of this module to avoid circular
     # imports
-    return pointer(cdata)
+    ptr = pointer(cdata)
+    if offset != 0:
+        ptr._buffer[0] += offset
+    return ptr
 
 def cdata_from_address(self, address):
     # fix the address: turn it into as unsigned, in case it's a negative number
diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_build.py
@@ -0,0 +1,91 @@
+# Note: uses the CFFI out-of-line ABI mode.  We can't use the API
+# mode because ffi.compile() needs to run the compiler, which
+# needs 'subprocess', which needs 'msvcrt' and '_subprocess',
+# which depend on '_pypy_winbase_cffi' already.
+#
+# Note that if you need to regenerate _pypy_winbase_cffi and
+# can't use a preexisting PyPy to do that, then running this
+# file should work as long as 'subprocess' is not imported
+# by cffi.  I had to hack in 'cffi._pycparser' to move an
+#'import subprocess' to the inside of a function.  (Also,
+# CPython+CFFI should work as well.)
+#
+# This module supports both msvcrt.py and _subprocess.py.
+
+from cffi import FFI
+
+ffi = FFI()
+
+ffi.set_source("_pypy_winbase_cffi", None)
+
+# ---------- MSVCRT ----------
+
+ffi.cdef("""
+typedef unsigned short wint_t;
+
+int _open_osfhandle(intptr_t osfhandle, int flags);
+intptr_t _get_osfhandle(int fd);
+int _setmode(int fd, int mode);
+int _locking(int fd, int mode, long nbytes);
+
+int _kbhit(void);
+int _getch(void);
+wint_t _getwch(void);
+int _getche(void);
+wint_t _getwche(void);
+int _putch(int);
+wint_t _putwch(wchar_t);
+int _ungetch(int);
+wint_t _ungetwch(wint_t);
+""")
+
+# ---------- SUBPROCESS ----------
+
+ffi.cdef("""
+typedef struct {
+    DWORD  cb;
+    char * lpReserved;
+    char * lpDesktop;
+    char * lpTitle;
+    DWORD  dwX;
+    DWORD  dwY;
+    DWORD  dwXSize;
+    DWORD  dwYSize;
+    DWORD  dwXCountChars;
+    DWORD  dwYCountChars;
+    DWORD  dwFillAttribute;
+    DWORD  dwFlags;
+    WORD   wShowWindow;
+    WORD   cbReserved2;
+    LPBYTE lpReserved2;
+    HANDLE hStdInput;
+    HANDLE hStdOutput;
+    HANDLE hStdError;
+} STARTUPINFO, *LPSTARTUPINFO;
+
+typedef struct {
+    HANDLE hProcess;
+    HANDLE hThread;
+    DWORD  dwProcessId;
+    DWORD  dwThreadId;
+} PROCESS_INFORMATION, *LPPROCESS_INFORMATION;
+
+DWORD WINAPI GetVersion(void);
+BOOL WINAPI CreatePipe(PHANDLE, PHANDLE, void *, DWORD);
+BOOL WINAPI CloseHandle(HANDLE);
+HANDLE WINAPI GetCurrentProcess(void);
+BOOL WINAPI DuplicateHandle(HANDLE, HANDLE, HANDLE, LPHANDLE,
+                            DWORD, BOOL, DWORD);
+BOOL WINAPI CreateProcessA(char *, char *, void *,
+                           void *, BOOL, DWORD, char *,
+                           char *, LPSTARTUPINFO, LPPROCESS_INFORMATION);
+DWORD WINAPI WaitForSingleObject(HANDLE, DWORD);
+BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD);
+BOOL WINAPI TerminateProcess(HANDLE, UINT);
+HANDLE WINAPI GetStdHandle(DWORD);
+""")
+
+# --------------------
+
+if __name__ == "__main__":
+    ffi.compile()
diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_cffi.py
@@ -0,0 +1,10 @@
+# auto-generated file
+import _cffi_backend
+
+ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
+    _version = 0x2601,
+    _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01',
+    _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0),
+    _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
+    _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'),
+)
diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py
--- a/lib_pypy/_subprocess.py
+++ b/lib_pypy/_subprocess.py
@@ -10,148 +10,99 @@
 
 # Declare external Win32 functions
 
-import ctypes
-
-_kernel32 = ctypes.WinDLL('kernel32')
-
-_CloseHandle = _kernel32.CloseHandle
-_CloseHandle.argtypes = [ctypes.c_int]
-_CloseHandle.restype = ctypes.c_int
-
-_CreatePipe = _kernel32.CreatePipe
-_CreatePipe.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
-                        ctypes.c_void_p, ctypes.c_int]
-_CreatePipe.restype = ctypes.c_int
-
-_GetCurrentProcess = _kernel32.GetCurrentProcess
-_GetCurrentProcess.argtypes = []
-_GetCurrentProcess.restype = ctypes.c_int
+from _pypy_winbase_cffi import ffi as _ffi
+_kernel32 = _ffi.dlopen('kernel32')
 
 GetVersion = _kernel32.GetVersion
-GetVersion.argtypes = []
-GetVersion.restype = ctypes.c_int
 
-_DuplicateHandle = _kernel32.DuplicateHandle
-_DuplicateHandle.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
-                             ctypes.POINTER(ctypes.c_int),
-                             ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_DuplicateHandle.restype = ctypes.c_int
-
-_WaitForSingleObject = _kernel32.WaitForSingleObject
-_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint]
-_WaitForSingleObject.restype = ctypes.c_int
-
-_GetExitCodeProcess = _kernel32.GetExitCodeProcess
-_GetExitCodeProcess.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
-_GetExitCodeProcess.restype = ctypes.c_int
-
-_TerminateProcess = _kernel32.TerminateProcess
-_TerminateProcess.argtypes = [ctypes.c_int, ctypes.c_int]
-_TerminateProcess.restype = ctypes.c_int
-
-_GetStdHandle = _kernel32.GetStdHandle
-_GetStdHandle.argtypes = [ctypes.c_int]
-_GetStdHandle.restype = ctypes.c_int
-
-class _STARTUPINFO(ctypes.Structure):
-    _fields_ = [('cb',         ctypes.c_int),
-                ('lpReserved', ctypes.c_void_p),
-                ('lpDesktop',  ctypes.c_char_p),
-                ('lpTitle',    ctypes.c_char_p),
-                ('dwX',        ctypes.c_int),
-                ('dwY',        ctypes.c_int),
-                ('dwXSize',    ctypes.c_int),
-                ('dwYSize',    ctypes.c_int),
-                ('dwXCountChars', ctypes.c_int),
-                ('dwYCountChars', ctypes.c_int),
-                ("dwFillAttribute", ctypes.c_int),
-                ("dwFlags", ctypes.c_int),
-                ("wShowWindow", ctypes.c_short),
-                ("cbReserved2", ctypes.c_short),
-                ("lpReserved2", ctypes.c_void_p),
-                ("hStdInput", ctypes.c_int),
-                ("hStdOutput", ctypes.c_int),
-                ("hStdError", ctypes.c_int)
-                ]
-
-class _PROCESS_INFORMATION(ctypes.Structure):
-    _fields_ = [("hProcess", ctypes.c_int),
-                ("hThread", ctypes.c_int),
-                ("dwProcessID", ctypes.c_int),
-                ("dwThreadID", ctypes.c_int)]
-
-_CreateProcess = _kernel32.CreateProcessA
-_CreateProcess.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p,
-                           ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p,
-                           ctypes.POINTER(_STARTUPINFO), ctypes.POINTER(_PROCESS_INFORMATION)]
-_CreateProcess.restype = ctypes.c_int
-
-del ctypes
 
 # Now the _subprocess module implementation
 
-from ctypes import c_int as _c_int, byref as _byref, WinError as _WinError
+def _WinError():
+    code, message = _ffi.getwinerror()
+    raise WindowsError(code, message)
 
-class _handle:
-    def __init__(self, handle):
-        self.handle = handle
+_INVALID_HANDLE_VALUE = _ffi.cast("HANDLE", -1)
+
+class _handle(object):
+    def __init__(self, c_handle):
+        # 'c_handle' is a cffi cdata of type HANDLE, which is basically 'void *'
+        self.c_handle = c_handle
+        if int(self) != -1:
+            self.c_handle = _ffi.gc(self.c_handle, _kernel32.CloseHandle)
 
     def __int__(self):
-        return self.handle
+        return int(_ffi.cast("intptr_t", self.c_handle))
 
-    def __del__(self):
-        if self.handle is not None:
-            _CloseHandle(self.handle)
+    def __repr__(self):
+        return '<_subprocess.handle %d at 0x%x>' % (int(self), id(self))
 
     def Detach(self):
-        handle, self.handle = self.handle, None
-        return handle
+        h = int(self)
+        if h != -1:
+            c_handle = self.c_handle
+            self.c_handle = _INVALID_HANDLE_VALUE
+            _ffi.gc(c_handle, None)
+        return h
 
     def Close(self):
-        if self.handle not in (-1, None):
-            _CloseHandle(self.handle)
-            self.handle = None
+        if int(self) != -1:
+            c_handle = self.c_handle
+            self.c_handle = _INVALID_HANDLE_VALUE
+            _ffi.gc(c_handle, None)
+            _kernel32.CloseHandle(c_handle)
 
 def CreatePipe(attributes, size):
-    read = _c_int()
-    write = _c_int()
+    handles = _ffi.new("HANDLE[2]")
 
-    res = _CreatePipe(_byref(read), _byref(write), None, size)
+    res = _kernel32.CreatePipe(handles, handles + 1, _ffi.NULL, size)
 
     if not res:
         raise _WinError()
 
-    return _handle(read.value), _handle(write.value)
+    return _handle(handles[0]), _handle(handles[1])
 
 def GetCurrentProcess():
-    return _handle(_GetCurrentProcess())
+    return _handle(_kernel32.GetCurrentProcess())
 
 def DuplicateHandle(source_process, source, target_process, access, inherit, options=0):
-    target = _c_int()
+    # CPython: the first three arguments are expected to be integers
+    target = _ffi.new("HANDLE[1]")
 
-    res = _DuplicateHandle(int(source_process), int(source), int(target_process),
-                           _byref(target),
-                           access, inherit, options)
+    res = _kernel32.DuplicateHandle(
+        _ffi.cast("HANDLE", source_process),
+        _ffi.cast("HANDLE", source),
+        _ffi.cast("HANDLE", target_process),
+        target, access, inherit, options)
 
     if not res:
         raise _WinError()
 
-    return _handle(target.value)
+    return _handle(target[0])
+
+def _z(input):
+    if input is None:
+        return _ffi.NULL
+    if isinstance(input, basestring):
+        return str(input)
+    raise TypeError("string/unicode/None expected, got %r" % (
+        type(input).__name__,))
 
 def CreateProcess(name, command_line, process_attr, thread_attr,
                   inherit, flags, env, start_dir, startup_info):
-    si = _STARTUPINFO()
+    si = _ffi.new("STARTUPINFO *")
     if startup_info is not None:
         si.dwFlags = startup_info.dwFlags
         si.wShowWindow = startup_info.wShowWindow
+        # CPython: these three handles are expected to be _handle objects
         if startup_info.hStdInput:
-            si.hStdInput = int(startup_info.hStdInput)
+            si.hStdInput = startup_info.hStdInput.c_handle
         if startup_info.hStdOutput:
-            si.hStdOutput = int(startup_info.hStdOutput)
+            si.hStdOutput = startup_info.hStdOutput.c_handle
         if startup_info.hStdError:
-            si.hStdError = int(startup_info.hStdError)
+            si.hStdError = startup_info.hStdError.c_handle
 
-    pi = _PROCESS_INFORMATION()
+    pi = _ffi.new("PROCESS_INFORMATION *")
 
     if env is not None:
         envbuf = ""
@@ -159,47 +110,55 @@
             envbuf += "%s=%s\0" % (k, v)
         envbuf += '\0'
     else:
-        envbuf = None
+        envbuf = _ffi.NULL
 
-    res = _CreateProcess(name, command_line, None, None, inherit, flags, envbuf,
-                        start_dir, _byref(si), _byref(pi))
+    res = _kernel32.CreateProcessA(_z(name), _z(command_line), _ffi.NULL,
+                                   _ffi.NULL, inherit, flags, envbuf,
+                                   _z(start_dir), si, pi)
 
     if not res:
         raise _WinError()
 
-    return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessID, pi.dwThreadID
+    return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessId, pi.dwThreadId
 
 def WaitForSingleObject(handle, milliseconds):
-    res = _WaitForSingleObject(int(handle), milliseconds)
-
+    # CPython: the first argument is expected to be an integer.
+    res = _kernel32.WaitForSingleObject(_ffi.cast("HANDLE", handle),
+                                        milliseconds)
     if res < 0:
         raise _WinError()
 
     return res
 
 def GetExitCodeProcess(handle):
-    code = _c_int()
+    # CPython: the first argument is expected to be an integer.
+    code = _ffi.new("DWORD[1]")
 
-    res = _GetExitCodeProcess(int(handle), _byref(code))
+    res = _kernel32.GetExitCodeProcess(_ffi.cast("HANDLE", handle), code)
 
     if not res:
         raise _WinError()
 
-    return code.value
+    return code[0]
 
 def TerminateProcess(handle, exitcode):
-    res = _TerminateProcess(int(handle), exitcode)
+    # CPython: the first argument is expected to be an integer.
+    # The second argument is silently wrapped in a UINT.
+    res = _kernel32.TerminateProcess(_ffi.cast("HANDLE", handle),
+                                     _ffi.cast("UINT", exitcode))
 
     if not res:
         raise _WinError()
 
 def GetStdHandle(stdhandle):
-    res = _GetStdHandle(stdhandle)
+    stdhandle = _ffi.cast("DWORD", stdhandle)
+    res = _kernel32.GetStdHandle(stdhandle)
 
     if not res:
         return None
     else:
-        return res
+        # note: returns integer, not handle object
+        return int(_ffi.cast("intptr_t", res))
 
 STD_INPUT_HANDLE = -10
 STD_OUTPUT_HANDLE = -11
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cffi
-Version: 1.7.0
+Version: 1.8.0
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
 #  include <stdint.h>
 # endif
 # if _MSC_VER < 1800   /* MSVC < 2013 */
-   typedef unsigned char _Bool;
+#  ifndef __cplusplus
+    typedef unsigned char _Bool;
+#  endif
 # endif
 #else
 # include <stdint.h>
@@ -59,7 +61,7 @@
 
 #ifdef __cplusplus
 # ifndef _Bool
-#  define _Bool bool   /* semi-hackish: C++ has no _Bool; bool is builtin */
+   typedef bool _Bool;   /* semi-hackish: C++ has no _Bool; bool is builtin */
 # endif
 #endif
 
@@ -196,20 +198,6 @@
     return NULL;
 }
 
-_CFFI_UNUSED_FN
-static PyObject **_cffi_unpack_args(PyObject *args_tuple, Py_ssize_t expected,
-                                    const char *fnname)
-{
-    if (PyTuple_GET_SIZE(args_tuple) != expected) {
-        PyErr_Format(PyExc_TypeError,
-                     "%.150s() takes exactly %zd arguments (%zd given)",
-                     fnname, expected, PyTuple_GET_SIZE(args_tuple));
-        return NULL;
-    }
-    return &PyTuple_GET_ITEM(args_tuple, 0);   /* pointer to the first item,
-                                                  the others follow */
-}
-
 /**********  end CPython-specific section  **********/
 #else
 _CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
         f = PySys_GetObject((char *)"stderr");
         if (f != NULL && f != Py_None) {
             PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
-                               "\ncompiled with cffi version: 1.7.0"
+                               "\ncompiled with cffi version: 1.8.0"
                                "\n_cffi_backend module: ", f);
             modules = PyImport_GetModuleDict();
             mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py
--- a/lib_pypy/cffi/_pycparser/__init__.py
+++ b/lib_pypy/cffi/_pycparser/__init__.py
@@ -10,7 +10,6 @@
 __all__ = ['c_lexer', 'c_parser', 'c_ast']
 __version__ = '2.14'
 
-from subprocess import Popen, PIPE
 from .c_parser import CParser
 
 
@@ -28,6 +27,7 @@
         When successful, returns the preprocessed file's contents.
         Errors from cpp will be printed out.
     """
+    from subprocess import Popen, PIPE
     path_list = [cpp_path]
     if isinstance(cpp_args, list):
         path_list += cpp_args
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -519,12 +519,10 @@
             smallest_value = min(self.enumvalues)
             largest_value = max(self.enumvalues)
         else:
-            import warnings
-            warnings.warn("%r has no values explicitly defined; next version "
-                          "will refuse to guess which integer type it is "
-                          "meant to be (unsigned/signed, int/long)"
-                          % self._get_c_name())
-            smallest_value = largest_value = 0
+            raise api.CDefError("%r has no values explicitly defined: "
+                                "refusing to guess which integer type it is "
+                                "meant to be (unsigned/signed, int/long)"
+                % self._get_c_name())
         if smallest_value < 0:   # needs a signed type
             sign = 1
             candidate1 = PrimitiveType("int")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -275,6 +275,8 @@
     def write_c_source_to_f(self, f, preamble):
         self._f = f
         prnt = self._prnt
+        if self.ffi._embedding is None:
+            prnt('#define Py_LIMITED_API')
         #
         # first the '#include' (actually done by inlining the file's content)
         lines = self._rel_readlines('_cffi_include.h')
@@ -513,7 +515,7 @@
                                                     tovar, errcode)
             return
         #
-        elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
+        elif isinstance(tp, model.StructOrUnionOrEnum):
             # a struct (not a struct pointer) as a function argument
             self._prnt('  if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
                       % (tovar, self._gettypenum(tp), fromvar))
@@ -570,7 +572,7 @@
         elif isinstance(tp, model.ArrayType):
             return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
                 var, self._gettypenum(model.PointerType(tp.item)))
-        elif isinstance(tp, model.StructType):
+        elif isinstance(tp, model.StructOrUnion):
             if tp.fldnames is None:
                 raise TypeError("'%s' is used as %s, but is opaque" % (
                     tp._get_c_name(), context))
@@ -683,13 +685,11 @@
             rng = range(len(tp.args))
             for i in rng:
                 prnt('  PyObject *arg%d;' % i)
-            prnt('  PyObject **aa;')
             prnt()
-            prnt('  aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
-            prnt('  if (aa == NULL)')
+            prnt('  if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
+                name, len(rng), len(rng),
+                ', '.join(['&arg%d' % i for i in rng])))
             prnt('    return NULL;')
-            for i in rng:
-                prnt('  arg%d = aa[%d];' % (i, i))
         prnt()
         #
         for i, type in enumerate(tp.args):
@@ -862,6 +862,8 @@
             enumfields = list(tp.enumfields())
             for fldname, fldtype, fbitsize, fqual in enumfields:
                 fldtype = self._field_type(tp, fldname, fldtype)
+                self._check_not_opaque(fldtype,
+                                       "field '%s.%s'" % (tp.name, fldname))
                 # cname is None for _add_missing_struct_unions() only
                 op = OP_NOOP
                 if fbitsize >= 0:
@@ -911,6 +913,13 @@
                             first_field_index, c_fields))
         self._seen_struct_unions.add(tp)
 
+    def _check_not_opaque(self, tp, location):
+        while isinstance(tp, model.ArrayType):
+            tp = tp.item
+        if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
+            raise TypeError(
+                "%s is of an opaque type (not declared in cdef())" % location)
+
     def _add_missing_struct_unions(self):
         # not very nice, but some struct declarations might be missing
         # because they don't have any known C name.  Check that they are
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -308,7 +308,7 @@
         elif isinstance(tp, model.ArrayType):
             return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
                 var, self._gettypenum(model.PointerType(tp.item)))
-        elif isinstance(tp, model.StructType):
+        elif isinstance(tp, model.StructOrUnion):
             if tp.fldnames is None:
                 raise TypeError("'%s' is used as %s, but is opaque" % (
                     tp._get_c_name(), context))
diff --git a/lib_pypy/msvcrt.py b/lib_pypy/msvcrt.py
--- a/lib_pypy/msvcrt.py
+++ b/lib_pypy/msvcrt.py
@@ -7,26 +7,39 @@
 # XXX incomplete: implemented only functions needed by subprocess.py
 # PAC: 2010/08 added MS locking for Whoosh
 
-import ctypes
+# 07/2016: rewrote in CFFI
+
+import sys
+if sys.platform != 'win32':
+    raise ImportError("The 'msvcrt' module is only available on Windows")
+
+import _rawffi
+from _pypy_winbase_cffi import ffi as _ffi
+_lib = _ffi.dlopen(_rawffi.get_libc().name)
+
 import errno
-from ctypes_support import standard_c_lib as _c
-from ctypes_support import get_errno
-
-try:
-    open_osfhandle = _c._open_osfhandle
-except AttributeError: # we are not on windows
-    raise ImportError
 
 try: from __pypy__ import builtinify, validate_fd
 except ImportError: builtinify = validate_fd = lambda f: f
 
 
-open_osfhandle.argtypes = [ctypes.c_int, ctypes.c_int]
-open_osfhandle.restype = ctypes.c_int
+def _ioerr():
+    e = _ffi.errno
+    raise IOError(e, errno.errorcode[e])
 
-_get_osfhandle = _c._get_osfhandle
-_get_osfhandle.argtypes = [ctypes.c_int]
-_get_osfhandle.restype = ctypes.c_int
+
+ at builtinify
+def open_osfhandle(fd, flags):
+    """"open_osfhandle(handle, flags) -> file descriptor
+
+    Create a C runtime file descriptor from the file handle handle. The
+    flags parameter should be a bitwise OR of os.O_APPEND, os.O_RDONLY,
+    and os.O_TEXT. The returned file descriptor may be used as a parameter
+    to os.fdopen() to create a file object."""
+    fd = _lib._open_osfhandle(fd, flags)
+    if fd == -1:
+        _ioerr()
+    return fd
 
 @builtinify
 def get_osfhandle(fd):
@@ -38,62 +51,74 @@
         validate_fd(fd)
     except OSError as e:
         raise IOError(*e.args)
-    return _get_osfhandle(fd)
+    result = _lib._get_osfhandle(fd)
+    if result == -1:
+        _ioerr()
+    return result
 
-setmode = _c._setmode
-setmode.argtypes = [ctypes.c_int, ctypes.c_int]
-setmode.restype = ctypes.c_int
+ at builtinify
+def setmode(fd, flags):
+    """setmode(fd, mode) -> Previous mode
+
+    Set the line-end translation mode for the file descriptor fd. To set
+    it to text mode, flags should be os.O_TEXT; for binary, it should be
+    os.O_BINARY."""
+    flags = _lib._setmode(fd, flags)
+    if flags == -1:
+        _ioerr()
+    return flags
 
 LK_UNLCK, LK_LOCK, LK_NBLCK, LK_RLCK, LK_NBRLCK = range(5)
 
-_locking = _c._locking
-_locking.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_locking.restype = ctypes.c_int
-
 @builtinify
 def locking(fd, mode, nbytes):
-    '''lock or unlock a number of bytes in a file.'''
-    rv = _locking(fd, mode, nbytes)
+    """"locking(fd, mode, nbytes) -> None
+
+    Lock part of a file based on file descriptor fd from the C runtime.
+    Raises IOError on failure. The locked region of the file extends from
+    the current file position for nbytes bytes, and may continue beyond
+    the end of the file. mode must be one of the LK_* constants listed
+    below. Multiple regions in a file may be locked at the same time, but
+    may not overlap. Adjacent regions are not merged; they must be unlocked
+    individually."""
+    rv = _lib._locking(fd, mode, nbytes)
     if rv != 0:
-        e = get_errno()
-        raise IOError(e, errno.errorcode[e])
+        _ioerr()
 
 # Console I/O routines
 
-kbhit = _c._kbhit
-kbhit.argtypes = []
-kbhit.restype = ctypes.c_int
+kbhit = _lib._kbhit
 
-getch = _c._getch
-getch.argtypes = []
-getch.restype = ctypes.c_char
+ at builtinify
+def getch():
+    return chr(_lib._getch())
 
-getwch = _c._getwch
-getwch.argtypes = []
-getwch.restype = ctypes.c_wchar
+ at builtinify
+def getwch():
+    return unichr(_lib._getwch())
 
-getche = _c._getche
-getche.argtypes = []
-getche.restype = ctypes.c_char
+ at builtinify
+def getche():
+    return chr(_lib._getche())
 
-getwche = _c._getwche
-getwche.argtypes = []
-getwche.restype = ctypes.c_wchar
+ at builtinify
+def getwche():
+    return unichr(_lib._getwche())
 
-putch = _c._putch
-putch.argtypes = [ctypes.c_char]
-putch.restype = None
+ at builtinify
+def putch(ch):
+    _lib._putch(ord(ch))
 
-putwch = _c._putwch
-putwch.argtypes = [ctypes.c_wchar]
-putwch.restype = None
+ at builtinify
+def putwch(ch):
+    _lib._putwch(ord(ch))
 
-ungetch = _c._ungetch
-ungetch.argtypes = [ctypes.c_char]
-ungetch.restype = None
+ at builtinify
+def ungetch(ch):
+    if _lib._ungetch(ord(ch)) == -1:   # EOF
+        _ioerr()
 
-ungetwch = _c._ungetwch
-ungetwch.argtypes = [ctypes.c_wchar]
-ungetwch.restype = None
-
-del ctypes
+ at builtinify
+def ungetwch(ch):
+    if _lib._ungetwch(ord(ch)) == -1:   # EOF
+        _ioerr()
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -86,7 +86,11 @@
     if len(limits) != 2:
         raise ValueError("expected a tuple of 2 integers")
 
-    if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+    # accept and round down floats, like CPython does
+    limit0 = int(limits[0])
+    limit1 = int(limits[1])
+
+    if lib.my_setrlimit(resource, limit0, limit1) == -1:
         if ffi.errno == EINVAL:
             raise ValueError("current limit exceeds maximum limit")
         elif ffi.errno == EPERM:
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -36,7 +36,7 @@
     "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
     "binascii", "_multiprocessing", '_warnings', "_collections",
     "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend",
-    "_csv", "cppyy", "_pypyjson",
+    "_csv", "cppyy", "_pypyjson", "_jitlog"
 ])
 
 from rpython.jit.backend import detect_cpu
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -104,27 +104,24 @@
 
     apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
     libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
-    tk-dev libgc-dev liblzma-dev
-
-For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
+    tk-dev libgc-dev \
+    liblzma-dev  # For lzma on PyPy3.
 
 On Fedora::
 
     dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
     lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
-    gdbm-devel
-
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
+    gdbm-devel \
+    xz-devel  # For lzma on PyPy3.
 
 On SLES11::
 
     zypper install gcc make python-devel pkg-config \
     zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
-    libexpat-devel libffi-devel python-curses
+    libexpat-devel libffi-devel python-curses \
+    xz-devel  # For lzma on PyPy3.
     (XXX plus the SLES11 version of libgdbm-dev and tk-dev)
 
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
-
 On Mac OS X, most of these build-time dependencies are installed alongside
 the Developer Tools. However, note that in order for the installation to
 find them you may need to run::
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
 
 The garbage collectors used or implemented by PyPy are not based on
 reference counting, so the objects are not freed instantly when they are no
-longer reachable.  The most obvious effect of this is that files are not
+longer reachable.  The most obvious effect of this is that files (and sockets, etc) are not
 promptly closed when they go out of scope.  For files that are opened for
 writing, data can be left sitting in their output buffers for a while, making
 the on-disk file appear empty or truncated.  Moreover, you might reach your
 OS's limit on the number of concurrently opened files.
 
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
 reference-counting approach to garbage collection.  The effect that you
 get in CPython has clearly been described as a side-effect of the
 implementation and not a language design decision: programs relying on
-this are basically bogus.  It would anyway be insane to try to enforce
+this are basically bogus.  It would be a too strong restriction to try to enforce
 CPython's behavior in a language spec, given that it has no chance to be
 adopted by Jython or IronPython (or any other port of Python to Java or
 .NET).
@@ -134,7 +141,7 @@
 
 Here are some more technical details.  This issue affects the precise
 time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython).  It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython).  It also means that
 **weak references** may stay alive for a bit longer than expected.  This
 makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
 useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -335,3 +335,65 @@
 
 This will disable SELinux's protection and allow PyPy to configure correctly.
 Be sure to enable it again if you need it!
+
+
+How should I report a bug?
+--------------------------
+
+Our bug tracker is here: https://bitbucket.org/pypy/pypy/issues/
+
+Missing features or incompatibilities with CPython are considered
+bugs, and they are welcome.  (See also our list of `known
+incompatibilities`__.)
+
+.. __: http://pypy.org/compat.html
+
+For bugs of the kind "I'm getting a PyPy crash or a strange
+exception", please note that: **We can't do anything without
+reproducing the bug ourselves**.  We cannot do anything with
+tracebacks from gdb, or core dumps.  This is not only because the
+standard PyPy is compiled without debug symbols.  The real reason is
+that a C-level traceback is usually of no help at all in PyPy.
+Debugging PyPy can be annoying.
+
+`This is a clear and useful bug report.`__  (Admittedly, sometimes
+the problem is really hard to reproduce, but please try to.)
+
+.. __: https://bitbucket.org/pypy/pypy/issues/2363/segfault-in-gc-pinned-object-in
+
+In more details:
+
+* First, please give the exact PyPy version, and the OS.
+
+* It might help focus our search if we know if the bug can be
+  reproduced on a "``pypy --jit off``" or not.  If "``pypy --jit
+  off``" always works, then the problem might be in the JIT.
+  Otherwise, we know we can ignore that part.
+
+* If you got the bug using only Open Source components, please give a
+  step-by-step guide that we can follow to reproduce the problem
+  ourselves.  Don't assume we know anything about any program other
+  than PyPy.  We would like a guide that we can follow point by point
+  (without guessing or having to figure things out)
+  on a machine similar to yours, starting from a bare PyPy, until we
+  see the same problem.  (If you can, you can try to reduce the number
+  of steps and the time it needs to run, but that is not mandatory.)
+
+* If the bug involves Closed Source components, or just too many Open
+  Source components to install them all ourselves, then maybe you can
+  give us some temporary ssh access to a machine where the bug can be
+  reproduced.  Or, maybe we can download a VirtualBox or VMWare
+  virtual machine where the problem occurs.
+
+* If giving us access would require us to use tools other than ssh,
+  make appointments, or sign a NDA, then we can consider a commerical
+  support contract for a small sum of money.
+
+* If even that is not possible for you, then sorry, we can't help.
+
+Of course, you can try to debug the problem yourself, and we can help
+you get started if you ask on the #pypy IRC channel, but be prepared:
+debugging an annoying PyPy problem usually involves quite a lot of gdb
+in auto-generated C code, and at least some knowledge about the
+various components involved, from PyPy's own RPython source code to
+the GC and possibly the JIT.
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -14,10 +14,9 @@
     Defaults to 1/2 of your cache or ``4M``.
     Small values (like 1 or 1KB) are useful for debugging.
 
-``PYPY_GC_NURSERY_CLEANUP``
-    The interval at which nursery is cleaned up. Must
-    be smaller than the nursery size and bigger than the
-    biggest object we can allotate in the nursery.
+``PYPY_GC_NURSERY_DEBUG``
+    If set to non-zero, will fill nursery with garbage, to help
+    debugging.
 
 ``PYPY_GC_INCREMENT_STEP``
     The size of memory marked during the marking step.  Default is size of
@@ -62,3 +61,8 @@
     use.
     Values are ``0`` (off), ``1`` (on major collections) or ``2`` (also
     on minor collections).
+
+``PYPY_GC_MAX_PINNED``
+    The maximal number of pinned objects at any point in time.  Defaults
+    to a conservative value depending on nursery size and maximum object
+    size inside the nursery.  Useful for debugging by setting it to 0.
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
 
+   release-pypy2.7-v5.4.0.rst
    release-pypy2.7-v5.3.1.rst
    release-pypy2.7-v5.3.0.rst
    release-5.1.1.rst
diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst
--- a/pypy/doc/install.rst
+++ b/pypy/doc/install.rst
@@ -39,17 +39,16 @@
 library.
 
 If you want to install 3rd party libraries, the most convenient way is
-to install pip_ (unless you want to install virtualenv as explained
-below; then you can directly use pip inside virtualenvs):
+to install pip_ using ensurepip_ (unless you want to install virtualenv as 
+explained below; then you can directly use pip inside virtualenvs):
 
 .. code-block:: console
 
-    $ curl -O https://bootstrap.pypa.io/get-pip.py
-    $ ./pypy-2.1/bin/pypy get-pip.py
-    $ ./pypy-2.1/bin/pip install pygments  # for example
+    $ ./pypy-xxx/bin/pypy -m ensurepip
+    $ ./pypy-xxx/bin/pip install pygments  # for example
 
-Third party libraries will be installed in ``pypy-2.1/site-packages``, and
-the scripts in ``pypy-2.1/bin``.
+Third party libraries will be installed in ``pypy-xxx/site-packages``, and
+the scripts in ``pypy-xxx/bin``.
 
 
 Installing using virtualenv
@@ -61,7 +60,7 @@
 checkout::
 
 	# from a tarball
-	$ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env
+	$ virtualenv -p /opt/pypy-xxx/bin/pypy my-pypy-env
 
 	# from the mercurial checkout
 	$ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env
@@ -69,7 +68,7 @@
 Note that bin/python is now a symlink to bin/pypy.
 
 .. _pip: http://pypi.python.org/pypi/pip
-
+.. _ensurepip: https://docs.python.org/2.7/library/ensurepip.html
 
 Building PyPy yourself
 ~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -2,6 +2,9 @@
  pypy
 ======
 
+.. note: this is turned into a regular man page "pypy.1" by
+   doing "make man" in pypy/doc/
+
 SYNOPSIS
 ========
 
@@ -48,6 +51,10 @@
 -B
     Disable writing bytecode (``.pyc``) files.
 
+-X track-resources
+    Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+    garbage collector.
+
 --version
     Print the PyPy version.
 
diff --git a/pypy/doc/release-pypy2.7-v5.4.0.rst b/pypy/doc/release-pypy2.7-v5.4.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy2.7-v5.4.0.rst
@@ -0,0 +1,142 @@
+============
+PyPy2.7 v5.4
+============
+
+We have released PyPy2.7 v5.4, a little under two months after PyPy2.7 v5.3.
+This new PyPy2.7 release includes further improvements to our C-API compatability layer (cpyext), enabling us to pass over 99% of the upstream
+numpy `test suite`_. We updated built-in cffi_ support to version 1.8,
+and fixed many issues and bugs raised by the growing community of PyPy
+users.
+
+XXXXX MORE ???
+
+You can download the PyPy2.7 v5.4 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors and
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_
+with making RPython's JIT even better.
+
+.. _`test suite`: https://bitbucket.org/pypy/pypy/wiki/Adventures%20in%20cpyext%20compatibility
+.. _cffi: https://cffi.readthedocs.org
+.. _`PyPy`: http://doc.pypy.org
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+This release supports: 
+
+  * **x86** machines on most common operating systems
+    (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+  
+  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+  
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Other Highlights (since 5.3 released in June 2016)
+=========================================================
+
+* New features:
+
+  * Add `sys.{get,set}dlopenflags`
+
+  * Improve CPython compatibility of 'is' for small and empty strings
+
+  * Support for rgc.FinalizerQueue in the Boehm garbage collector
+
+  * (RPython) support spawnv() if it is called in C `_spawnv` on windows
+
+  * Fill in more slots when creating a PyTypeObject from a W_TypeObject,
+    like `__hex__`, `__sub__`, `__pow__`
+
+  * Copy CPython's logic more closely for `isinstance()` and
+    `issubclass()` as well as `type.__instancecheck__()` and
+    `type.__subclasscheck__()`
+
+* Bug Fixes
+
+  * Reject `mkdir()` in read-only sandbox filesystems
+
+  * Add include guards to pymem.h to enable c++ compilation
+
+  * Fix OpenBSD build breakage and support OpenBSD in VMProf.
+
+  * Fix for `bytearray('').replace('a', 'ab')` for empty strings
+
+  * Sync internal state before calling `PyFile_AsFile()`
+
+  * Allow writing to a char* from `PyString_AsString()` until it is
+    forced, also refactor `PyStringObject` to look like CPython's
+    and allow subclassing `PyString_Type` and `PyUnicode_Type`
+
+  * Rpython rffi's socket(2) wrapper did not preserve errno
+
+  * Refactor `PyTupleObject` to look like CPython's and allow
+    subclassing `PyTuple_Type`
+
+  * Allow c-level assignment to a function pointer in a C-API
+    user-defined type after calling PyTypeReady by retrieving
+    a pointer to the function via offsets
+    rather than storing the function pointer itself
+
+  * Use `madvise(MADV_FREE)`, or if that doesn't exist
+    `MADV_DONTNEED` on freed arenas to release memory back to the
+    OS for resource monitoring
+
+  * Issues reported with our previous release were resolved_ after
+    reports from users on our issue tracker at
+    https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy
+
+* Performance improvements:
+
+  * Add a before_call()-like equivalent before a few operations like
+   `malloc_nursery`, to move values from registers into other registers
+    instead of to the stack.
+
+  * More tightly pack the stack when calling with `release gil`
+
+  * Support `int_floordiv()`, `int_mod()` in the JIT more efficiently
+    and add `rarithmetic.int_c_div()`, `rarithmetic.int_c_mod()` as
+    explicit interfaces. Clarify that `int_floordiv()` does python-style
+    rounding, unlike `llop.int_floordiv()`.
+
+  * Use `ll_assert` (more often) in incminimark
+
+  * (Testing) Simplify handling of interp-level tests and make it
+    more forward-compatible. Don't use interp-level RPython
+    machinery to test building app-level extensions in cpyext
+
+  * Constant-fold `ffi.offsetof("structname", "fieldname")` in cffi
+    backend
+
+  * Avoid a case in the JIT, where successive guard failures in
+    the same Python function end up as successive levels of
+    RPython functions, eventually exhausting the stack, while at
+    app-level the traceback is very short
+
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.3.0.html
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -53,3 +53,109 @@
 
 Refactor PyTupleObject to look like cpython's and allow subclassing 
 PyTuple_Type
+
+.. branch: call-via-pyobj
+
+Use offsets from PyTypeObject to find actual c function to call rather than
+fixed functions, allows function override after PyType_Ready is called
+
+.. branch: issue2335
+
+Avoid exhausting the stack in the JIT due to successive guard
+failures in the same Python function ending up as successive levels of
+RPython functions, while at app-level the traceback is very short
+
+.. branch: use-madv-free
+
+Try harder to memory to the OS.  See e.g. issue #2336.  Note that it does
+not show up as a reduction of the VIRT column in ``top``, and the RES
+column might also not show the reduction, particularly on Linux >= 4.5 or
+on OS/X: it uses MADV_FREE, which only marks the pages as returnable to
+the OS if the memory is low.
+
+.. branch: cpyext-slotdefs2
+
+Fill in more slots when creating a PyTypeObject from a W_TypeObject
+More slots are still TBD, like tp_print and richcmp
+
+.. branch: json-surrogates
+
+Align json module decode with the cpython's impl, fixes issue 2345
+
+.. branch: issue2343
+
+Copy CPython's logic more closely for handling of ``__instancecheck__()``
+and ``__subclasscheck__()``.  Fixes issue 2343.
+
+.. branch: msvcrt-cffi
+
+Rewrite the Win32 dependencies of 'subprocess' to use cffi instead
+of ctypes. This avoids importing ctypes in many small programs and
+scripts, which in turn avoids enabling threads (because ctypes
+creates callbacks at import time, and callbacks need threads).
+
+.. branch: new-jit-log
+
+The new logging facility that integrates with and adds features to vmprof.com.
+
+.. branch: jitlog-32bit
+
+Resolve issues to use the new logging facility on a 32bit system
+
+.. branch: ep2016sprint
+
+Trying harder to make hash(-1) return -2, like it does on CPython
+
+.. branch: jitlog-exact-source-lines
+
+Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)`` in CFFI.  Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
+
+.. branch: cpyext-realloc
+
+Implement PyObject_Realloc
+
+.. branch: inline-blocks
+
+Improve a little bit the readability of the generated C code
+
+.. branch: improve-vmprof-testing
+
+Improved vmprof support: now tries hard to not miss any Python-level
+frame in the captured stacks, even if there is the metainterp or
+blackhole interp involved.  Also fix the stacklet (greenlet) support.
+
+.. branch: py2-mappingproxy
+
+``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython.
+Previously it returned what looked like a regular dict object (but it
+was already read-only).
+
+
+.. branch: const-fold-we-are-jitted
+
+Reduce the size of the generated C code by constant-folding ``we_are_jitted``
+in non-jitcode.
+
+.. branch: memoryview-attributes
+
+Support for memoryview attributes (format, itemsize, ...).
+Extends the cpyext emulation layer.
+
+.. branch: redirect-assembler-jitlog
+
+Log more information to properly rebuild the redirected traces in jitviewer.
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -24,11 +24,15 @@
 -V     : print the Python version number and exit (also --version)
 -W arg : warning control; arg is action:message:category:module:lineno
          also PYTHONWARNINGS=arg
+-X arg : set implementation-specific option
 file   : program read from script file
 -      : program read from stdin (default; interactive mode if a tty)
 arg ...: arguments passed to program in sys.argv[1:]
+
 PyPy options and arguments:
 --info : print translation information about this PyPy executable
+-X track-resources : track the creation of files and sockets and display
+                     a warning if they are not closed explicitly
 """
 # Missing vs CPython: PYTHONHOME, PYTHONCASEOK
 USAGE2 = """
@@ -235,6 +239,14 @@
         import pypyjit
         pypyjit.set_param(jitparam)
 
+def set_runtime_options(options, Xparam, *args):
+    if Xparam == 'track-resources':
+        sys.pypy_set_track_resources(True)
+    else:
+        print >> sys.stderr, 'usage: %s -X [options]' % (get_sys_executable(),)
+        print >> sys.stderr, '[options] can be: track-resources'
+        raise SystemExit
+
 class CommandLineError(Exception):
     pass
 
@@ -410,6 +422,7 @@
     '--info':    (print_info,      None),
     '--jit':     (set_jit_option,  Ellipsis),
     '-funroll-loops': (funroll_loops, None),
+    '-X':        (set_runtime_options, Ellipsis),
     '--':        (end_options,     None),
     }
 
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -387,7 +387,8 @@
     def _stacksize(self, blocks):
         """Compute co_stacksize."""
         for block in blocks:
-            block.initial_depth = 0
+            block.initial_depth = -99
+        blocks[0].initial_depth = 0
         # Assumes that it is sufficient to walk the blocks in 'post-order'.
         # This means we ignore all back-edges, but apart from that, we only
         # look into a block when all the previous blocks have been done.
@@ -406,8 +407,11 @@
 
     def _do_stack_depth_walk(self, block):
         depth = block.initial_depth
+        if depth == -99:     # this block is never reached, skip
+             return 0
         for instr in block.instructions:
             depth += _opcode_stack_effect(instr.opcode, instr.arg)
+            assert depth >= 0
             if depth >= self._max_depth:
                 self._max_depth = depth
             jump_op = instr.opcode
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -108,8 +108,15 @@
         return getattr(space, name)(operand)
     return do_fold
 
-def _fold_pow(space, left, right):
-    return space.pow(left, right, space.w_None)
+def _fold_pow(space, w_left, w_right):
+    # don't constant-fold if "w_left" and "w_right" are integers and
+    # the estimated bit length of the power is unreasonably large
+    space.appexec([w_left, w_right], """(left, right):
+        if isinstance(left, (int, long)) and isinstance(right, (int, long)):
+            if left.bit_length() * right > 5000:
+                raise OverflowError
+    """)
+    return space.pow(w_left, w_right, space.w_None)
 
 def _fold_not(space, operand):
     return space.wrap(not space.is_true(operand))
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1166,3 +1166,22 @@
             counts = self.count_instructions(source)
             assert ops.BUILD_SET not in counts
             assert ops.LOAD_CONST in counts
+
+    def test_dont_fold_huge_powers(self):
+        for source in (
+            "2 ** 3000",         # not constant-folded: too big
+            "(-2) ** 3000",
+            ):
+            source = 'def f(): %s' % source
+            counts = self.count_instructions(source)
+            assert ops.BINARY_POWER in counts
+
+        for source in (
+            "2 ** 2000",         # constant-folded
+            "2 ** -3000",
+            "1.001 ** 3000",
+            "1 ** 3000.0",
+            ):
+            source = 'def f(): %s' % source
+            counts = self.count_instructions(source)
+            assert ops.BINARY_POWER not in counts
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -208,7 +208,8 @@
     def buffer_w(self, space, flags):
         w_impl = space.lookup(self, '__buffer__')
         if w_impl is not None:
-            w_result = space.get_and_call_function(w_impl, self)
+            w_result = space.get_and_call_function(w_impl, self, 
+                                        space.newint(flags))
             if space.isinstance_w(w_result, space.w_buffer):
                 return w_result.buffer_w(space, flags)
         raise BufferInterfaceNotFound
@@ -216,7 +217,8 @@
     def readbuf_w(self, space):
         w_impl = space.lookup(self, '__buffer__')
         if w_impl is not None:
-            w_result = space.get_and_call_function(w_impl, self)
+            w_result = space.get_and_call_function(w_impl, self,
+                                        space.newint(space.BUF_FULL_RO))
             if space.isinstance_w(w_result, space.w_buffer):
                 return w_result.readbuf_w(space)
         raise BufferInterfaceNotFound
@@ -224,7 +226,8 @@
     def writebuf_w(self, space):
         w_impl = space.lookup(self, '__buffer__')
         if w_impl is not None:
-            w_result = space.get_and_call_function(w_impl, self)
+            w_result = space.get_and_call_function(w_impl, self,
+                                        space.newint(space.BUF_FULL))
             if space.isinstance_w(w_result, space.w_buffer):
                 return w_result.writebuf_w(space)
         raise BufferInterfaceNotFound
@@ -232,7 +235,8 @@
     def charbuf_w(self, space):
         w_impl = space.lookup(self, '__buffer__')
         if w_impl is not None:
-            w_result = space.get_and_call_function(w_impl, self)
+            w_result = space.get_and_call_function(w_impl, self,
+                                        space.newint(space.BUF_FULL_RO))
             if space.isinstance_w(w_result, space.w_buffer):
                 return w_result.charbuf_w(space)
         raise BufferInterfaceNotFound
@@ -1729,6 +1733,23 @@
                 "Python int too large for C unsigned short")
         return value
 
+    def c_uid_t_w(self, w_obj):
+        # xxx assumes that uid_t and gid_t are a C unsigned int.
+        # Equivalent to space.c_uint_w(), with the exception that
+        # it also accepts -1 and converts that to UINT_MAX, which
+        # is (uid_t)-1.  And values smaller than -1 raise
+        # OverflowError, not ValueError.
+        try:
+            return self.c_uint_w(w_obj)
+        except OperationError as e:
+            if e.match(self, self.w_ValueError):
+                # ValueError: cannot convert negative integer to unsigned
+                if self.int_w(w_obj) == -1:
+                    return UINT_MAX
+                raise oefmt(self.w_OverflowError,
+                            "user/group id smaller than minimum (-1)")
+            raise
+
     def truncatedint_w(self, w_obj, allow_conversion=True):
         # Like space.gateway_int_w(), but return the integer truncated
         # instead of raising OverflowError.  For obscure cases only.
@@ -1790,6 +1811,40 @@
             _warnings.warn(msg, warningcls, stacklevel=stacklevel)
         """)
 
+    def resource_warning(self, w_msg, w_tb):
+        self.appexec([w_msg, w_tb],
+                     """(msg, tb):
+            import sys
+            print >> sys.stderr, msg
+            if tb:
+                print >> sys.stderr, "Created at (most recent call last):"
+                print >> sys.stderr, tb
+        """)
+
+    def format_traceback(self):
+        # we need to disable track_resources before calling the traceback
+        # module. Else, it tries to open more files to format the traceback,
+        # the file constructor will call space.format_traceback etc., in an
+        # inifite recursion
+        flag = self.sys.track_resources
+        self.sys.track_resources = False
+        try:
+            return self.appexec([],
+                         """():
+                import sys, traceback
+                # the "1" is because we don't want to show THIS code
+                # object in the traceback
+                try:
+                    f = sys._getframe(1)
+                except ValueError:
+                    # this happens if you call format_traceback at the very beginning
+                    # of startup, when there is no bottom code object
+                    return '<no stacktrace available>'
+                return "".join(traceback.format_stack(f))
+            """)
+        finally:
+            self.sys.track_resources = flag
+
 
 class AppExecCache(SpaceCache):
     def build(cache, source):
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -167,6 +167,9 @@
     def visit_c_ushort(self, el, app_sig):
         self.checked_space_method(el, app_sig)
 
+    def visit_c_uid_t(self, el, app_sig):
+        self.checked_space_method(el, app_sig)
+
     def visit_truncatedint_w(self, el, app_sig):
         self.checked_space_method(el, app_sig)
 
@@ -294,6 +297,9 @@
     def visit_c_ushort(self, typ):
         self.run_args.append("space.c_ushort_w(%s)" % (self.scopenext(),))
 
+    def visit_c_uid_t(self, typ):
+        self.run_args.append("space.c_uid_t_w(%s)" % (self.scopenext(),))
+
     def visit_truncatedint_w(self, typ):
         self.run_args.append("space.truncatedint_w(%s)" % (self.scopenext(),))
 
@@ -440,6 +446,9 @@
     def visit_c_ushort(self, typ):
         self.unwrap.append("space.c_ushort_w(%s)" % (self.nextarg(),))
 
+    def visit_c_uid_t(self, typ):
+        self.unwrap.append("space.c_uid_t_w(%s)" % (self.nextarg(),))
+
     def visit_truncatedint_w(self, typ):
         self.unwrap.append("space.truncatedint_w(%s)" % (self.nextarg(),))
 
@@ -587,7 +596,10 @@
 
         # First extract the signature from the (CPython-level) code object
         from pypy.interpreter import pycode
-        argnames, varargname, kwargname = pycode.cpython_code_signature(func.func_code)
+        sig = pycode.cpython_code_signature(func.func_code)
+        argnames = sig.argnames
+        varargname = sig.varargname
+        kwargname = sig.kwargname
         self._argnames = argnames
 
         if unwrap_spec is None:
@@ -611,7 +623,9 @@
         app_sig = SignatureBuilder(func)
 
         UnwrapSpec_Check(orig_sig).apply_over(unwrap_spec, app_sig)
-        self.sig = argnames, varargname, kwargname = app_sig.signature()
+        self.sig = app_sig.signature()
+        argnames = self.sig.argnames
+        varargname = self.sig.varargname
 
         self.minargs = len(argnames)
         if varargname:
@@ -942,7 +956,7 @@
                 defs_w.append(space.wrap(defaultval))
         if self._code._unwrap_spec:
             UNDEFINED = object()
-            alldefs_w = [UNDEFINED] * len(self._code.sig[0])
+            alldefs_w = [UNDEFINED] * len(self._code.sig.argnames)
             if defs_w:
                 alldefs_w[-len(defs_w):] = defs_w
             code = self._code
@@ -959,7 +973,7 @@
                     assert isinstance(w_default, W_Root)
                     assert argname.startswith('w_')
                     argname = argname[2:]
-                    j = self._code.sig[0].index(argname)
+                    j = self._code.sig.argnames.index(argname)
                     assert alldefs_w[j] in (UNDEFINED, None)
                     alldefs_w[j] = w_default
             first_defined = 0
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -37,7 +37,7 @@
 
 # cpython_code_signature helper
 def cpython_code_signature(code):
-    "([list-of-arg-names], vararg-name-or-None, kwarg-name-or-None)."
+    """Return a Signature instance."""
     argcount = code.co_argcount
     varnames = code.co_varnames
     assert argcount >= 0     # annotator hint
diff --git a/pypy/interpreter/signature.py b/pypy/interpreter/signature.py
--- a/pypy/interpreter/signature.py
+++ b/pypy/interpreter/signature.py
@@ -55,18 +55,3 @@
         if not isinstance(other, Signature):
             return NotImplemented
         return not self == other
-
-
-    # make it look tuply for its use in the annotator
-
-    def __len__(self):
-        return 3
-
-    def __getitem__(self, i):
-        if i == 0:
-            return self.argnames
-        if i == 1:
-            return self.varargname
-        if i == 2:
-            return self.kwargname
-        raise IndexError
\ No newline at end of file
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -220,6 +220,13 @@
         expected = {"no_user_site": True}
         self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass', **expected)
 
+    def test_track_resources(self, monkeypatch):
+        myflag = [False]
+        def pypy_set_track_resources(flag):
+            myflag[0] = flag
+        monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
+        self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
+        assert myflag[0] == True
 
 class TestInteraction:
     """
@@ -1074,4 +1081,3 @@
             # assert it did not crash
         finally:
             sys.path[:] = old_sys_path
-
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -46,13 +46,6 @@
         assert sig.find_argname("c") == 2
         assert sig.find_argname("d") == -1
 
-    def test_tuply(self):
-        sig = Signature(["a", "b", "c"], "d", "e")
-        x, y, z = sig
-        assert x == ["a", "b", "c"]
-        assert y == "d"
-        assert z == "e"
-
 class dummy_wrapped_dict(dict):
     def __nonzero__(self):
         raise NotImplementedError
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -427,3 +427,28 @@
         space.finish()
         # assert that we reach this point without getting interrupted
         # by the OperationError(NameError)
+
+    def test_format_traceback(self):
+        from pypy.tool.pytest.objspace import maketestobjspace
+        from pypy.interpreter.gateway import interp2app
+        #
+        def format_traceback(space):
+            return space.format_traceback()
+        #
+        space = maketestobjspace()
+        w_format_traceback = space.wrap(interp2app(format_traceback))
+        w_tb = space.appexec([w_format_traceback], """(format_traceback):
+            def foo():
+                return bar()
+            def bar():
+                return format_traceback()
+            return foo()
+        """)
+        tb = space.str_w(w_tb)
+        expected = '\n'.join([
+            '  File "?", line 6, in anonymous',  # this is the appexec code object
+            '  File "?", line 3, in foo',
+            '  File "?", line 5, in bar',
+            ''
+        ])
+        assert tb == expected
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -203,7 +203,8 @@
     name = func.__name__
     extra = ', '.join(extraargs)
     from pypy.interpreter import pycode
-    argnames, _, _ = pycode.cpython_code_signature(func.func_code)
+    sig = pycode.cpython_code_signature(func.func_code)
+    argnames = sig.argnames
     if use_closure:
         if argnames[1] == 'space':
             args = "closure, space, obj"
diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py
--- a/pypy/module/__builtin__/abstractinst.py
+++ b/pypy/module/__builtin__/abstractinst.py
@@ -46,9 +46,65 @@
             raise       # propagate other errors
         return space.type(w_obj)
 
+
+# ---------- isinstance ----------
+
+
+def p_recursive_isinstance_w(space, w_inst, w_cls):
+    # Copied straight from CPython 2.7.  Does not handle 'cls' being a tuple.
+    if (isinstance(w_cls, W_ClassObject) and
+        isinstance(w_inst, W_InstanceObject)):
+        return w_inst.w_class.is_subclass_of(w_cls)
+
+    if space.isinstance_w(w_cls, space.w_type):
+        return p_recursive_isinstance_type_w(space, w_inst, w_cls)
+
+    check_class(space, w_cls, "isinstance() arg 2 must be a class, type,"
+                              " or tuple of classes and types")
+    try:
+        w_abstractclass = space.getattr(w_inst, space.wrap('__class__'))
+    except OperationError as e:
+        if e.async(space):      # ignore most exceptions
+            raise
+        return False
+    else:
+        return p_abstract_issubclass_w(space, w_abstractclass, w_cls)
+
+
+def p_recursive_isinstance_type_w(space, w_inst, w_type):
+    # subfunctionality of p_recursive_isinstance_w(): assumes that w_type is
+    # a type object.  Copied straight from CPython 2.7.
+    if space.isinstance_w(w_inst, w_type):
+        return True
+    try:
+        w_abstractclass = space.getattr(w_inst, space.wrap('__class__'))
+    except OperationError as e:
+        if e.async(space):      # ignore most exceptions
+            raise
+    else:
+        if w_abstractclass is not space.type(w_inst):
+            if space.isinstance_w(w_abstractclass, space.w_type):
+                return space.issubtype_w(w_abstractclass, w_type)
+    return False
+
+
 @jit.unroll_safe
 def abstract_isinstance_w(space, w_obj, w_klass_or_tuple, allow_override=False):
     """Implementation for the full 'isinstance(obj, klass_or_tuple)'."""
+    # Copied from CPython 2.7's PyObject_Isinstance().  Additionally,
+    # if 'allow_override' is False (the default), then don't try to
+    # use a custom __instancecheck__ method.
+
+    # WARNING: backward compatibility function name here.  CPython
+    # uses the name "abstract" to refer to the logic of handling
+    # class-like objects, with a "__bases__" attribute.  This function
+    # here is not related to that and implements the full
+    # PyObject_IsInstance() logic.
+
+    # Quick test for an exact match
+    if space.type(w_obj) is w_klass_or_tuple:
+        return True
+
     # -- case (anything, tuple)
     # XXX it might be risky that the JIT sees this
     if space.isinstance_w(w_klass_or_tuple, space.w_tuple):
@@ -58,68 +114,59 @@
         return False
 
     # -- case (anything, type)
-    try:
-        if allow_override:
-            w_result = space.isinstance_allow_override(w_obj, w_klass_or_tuple)
-        else:
-            w_result = space.isinstance(w_obj, w_klass_or_tuple)
-    except OperationError as e:   # if w_klass_or_tuple was not a type, ignore it
-        if not e.match(space, space.w_TypeError):
-            raise       # propagate other errors
-    else:
-        if space.is_true(w_result):
-            return True
-        # From now on we know that w_klass_or_tuple is indeed a type.
-        # Try also to compare it with obj.__class__, if this is not
-        # the same as type(obj).
-        try:
-            w_pretendtype = space.getattr(w_obj, space.wrap('__class__'))
-            if space.is_w(w_pretendtype, space.type(w_obj)):
-                return False     # common case: obj.__class__ is type(obj)
-            if not allow_override:
-                return space.issubtype_w(w_pretendtype, w_klass_or_tuple)
-            w_result = space.issubtype_allow_override(w_pretendtype,
-                                                      w_klass_or_tuple)
-        except OperationError as e:
-            if e.async(space):
-                raise
-            return False      # ignore most exceptions
-        else:
-            return space.is_true(w_result)
+    if allow_override:
+        w_check = space.lookup(w_klass_or_tuple, "__instancecheck__")
+        if w_check is not None:
+            # this is the common case: all type objects have a method
+            # __instancecheck__.  The one in the base 'type' type calls
+            # back p_recursive_isinstance_type_w() from the present module.
+            return space.is_true(space.get_and_call_function(
+                w_check, w_klass_or_tuple, w_obj))
 
-    # -- case (old-style instance, old-style class)
-    if isinstance(w_klass_or_tuple, W_ClassObject):
-        if isinstance(w_obj, W_InstanceObject):
-            return w_obj.w_class.is_subclass_of(w_klass_or_tuple)
-    return _abstract_isinstance_w_helper(space, w_obj, w_klass_or_tuple)
+    return p_recursive_isinstance_w(space, w_obj, w_klass_or_tuple)
 
 
-def _abstract_isinstance_w_helper(space, w_obj, w_klass_or_tuple):
-    # -- case (anything, abstract-class)
-    check_class(space, w_klass_or_tuple,
-                "isinstance() arg 2 must be a class, type,"
-                " or tuple of classes and types")
-    try:
-        w_abstractclass = space.getattr(w_obj, space.wrap('__class__'))
-    except OperationError as e:
-        if e.async(space):      # ignore most exceptions
-            raise
-        return False
-    else:
-        return _issubclass_recurse(space, w_abstractclass, w_klass_or_tuple)
+# ---------- issubclass ----------
 
 
 @jit.unroll_safe
-def _issubclass_recurse(space, w_derived, w_top):
-    """Internal helper for abstract cases.  Here, w_top cannot be a tuple."""
-    if space.is_w(w_derived, w_top):
-        return True
-    w_bases = _get_bases(space, w_derived)
-    if w_bases is not None:
-        for w_base in space.fixedview(w_bases):
-            if _issubclass_recurse(space, w_base, w_top):
+def p_abstract_issubclass_w(space, w_derived, w_cls):
+    # Copied straight from CPython 2.7, function abstract_issubclass().
+    # Don't confuse this with the function abstract_issubclass_w() below.
+    # Here, w_cls cannot be a tuple.
+    while True:
+        if space.is_w(w_derived, w_cls):
+            return True
+        w_bases = _get_bases(space, w_derived)
+        if w_bases is None:
+            return False
+        bases_w = space.fixedview(w_bases)
+        last_index = len(bases_w) - 1
+        if last_index < 0:
+            return False
+        # Avoid recursivity in the single inheritance case; in general,
+        # don't recurse on the last item in the tuple (loop instead).
+        for i in range(last_index):
+            if p_abstract_issubclass_w(space, bases_w[i], w_cls):
                 return True
-    return False
+        w_derived = bases_w[last_index]
+
+
+def p_recursive_issubclass_w(space, w_derived, w_cls):
+    # From CPython's function of the same name (which as far as I can tell
+    # is not recursive).  Copied straight from CPython 2.7.
+    if (space.isinstance_w(w_cls, space.w_type) and
+        space.isinstance_w(w_derived, space.w_type)):
+        return space.issubtype_w(w_derived, w_cls)
+    #
+    if (isinstance(w_derived, W_ClassObject) and
+        isinstance(w_cls, W_ClassObject)):
+        return w_derived.is_subclass_of(w_cls)
+    #
+    check_class(space, w_derived, "issubclass() arg 1 must be a class")
+    check_class(space, w_cls, "issubclass() arg 2 must be a class"
+                              " or tuple of classes")
+    return p_abstract_issubclass_w(space, w_derived, w_cls)
 
 
 @jit.unroll_safe
@@ -127,37 +174,31 @@
                           allow_override=False):
     """Implementation for the full 'issubclass(derived, klass_or_tuple)'."""
 
-    # -- case (class-like-object, tuple-of-classes)
+    # WARNING: backward compatibility function name here.  CPython
+    # uses the name "abstract" to refer to the logic of handling
+    # class-like objects, with a "__bases__" attribute.  This function
+    # here is not related to that and implements the full
+    # PyObject_IsSubclass() logic.  There is also p_abstract_issubclass_w().
+
+    # -- case (anything, tuple-of-classes)
     if space.isinstance_w(w_klass_or_tuple, space.w_tuple):
         for w_klass in space.fixedview(w_klass_or_tuple):
             if abstract_issubclass_w(space, w_derived, w_klass, allow_override):
                 return True
         return False
 
-    # -- case (type, type)
-    try:
-        if not allow_override:
-            return space.issubtype_w(w_derived, w_klass_or_tuple)
-        w_result = space.issubtype_allow_override(w_derived, w_klass_or_tuple)
-    except OperationError as e:   # if one of the args was not a type, ignore it
-        if not e.match(space, space.w_TypeError):
-            raise       # propagate other errors
-    else:
-        return space.is_true(w_result)
+    # -- case (anything, type)
+    if allow_override:
+        w_check = space.lookup(w_klass_or_tuple, "__subclasscheck__")


More information about the pypy-commit mailing list