[pypy-commit] pypy stmgc-c8: another merge

Raemi pypy.commits at gmail.com
Wed Feb 24 11:55:40 EST 2016


Author: Remi Meier <remi.meier at gmail.com>
Branch: stmgc-c8
Changeset: r82485:3edb1e823e07
Date: 2016-02-24 17:54 +0100
http://bitbucket.org/pypy/pypy/changeset/3edb1e823e07/

Log:	another merge

diff too long, truncating to 2000 out of 2914 lines

diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -75,6 +75,7 @@
 ^lib_pypy/__pycache__$
 ^lib_pypy/ctypes_config_cache/_.+_cache\.py$
 ^lib_pypy/ctypes_config_cache/_.+_.+_\.py$
+^lib_pypy/_libmpdec/.+.o$
 ^rpython/translator/cli/query-descriptions$
 ^pypy/doc/discussion/.+\.html$
 ^include/.+\.h$
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -54,7 +54,8 @@
 It is quite common nowadays that xyz is available on PyPI_ and
 installable with ``pip install xyz``.  The simplest solution is to `use
 virtualenv (as documented here)`_.  Then enter (activate) the virtualenv
-and type: ``pip install xyz``.
+and type: ``pip install xyz``.  If you don't know or don't want virtualenv,
+you can also install ``pip`` globally by saying ``pypy -m ensurepip``.
 
 If you get errors from the C compiler, the module is a CPython C
 Extension module using unsupported features.  `See below.`_
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -133,3 +133,9 @@
 `rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen
 for traces containing a large number of pure getfield operations.
 
+.. branch: exctrans
+
+Try to ensure that no new functions get annotated during the 'source_c' phase.
+Refactor sandboxing to operate at a higher level.
+
+.. branch: cpyext-bootstrap
diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py
--- a/pypy/module/_cffi_backend/embedding.py
+++ b/pypy/module/_cffi_backend/embedding.py
@@ -84,11 +84,68 @@
     return rffi.cast(rffi.INT, res)
 
 # ____________________________________________________________
+if os.name == 'nt':
+    do_startup = r'''
+#include <stdio.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+RPY_EXPORTED void rpython_startup_code(void);
+RPY_EXPORTED int pypy_setup_home(char *, int);
 
+static unsigned char _cffi_ready = 0;
+static const char *volatile _cffi_module_name;
 
-eci = ExternalCompilationInfo(separate_module_sources=[
-r"""
-/* XXX Windows missing */
+static void _cffi_init_error(const char *msg, const char *extra)
+{
+    fprintf(stderr,
+            "\nPyPy initialization failure when loading module '%s':\n%s%s\n",
+            _cffi_module_name, msg, extra);
+}
+
+BOOL CALLBACK _cffi_init(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex)
+{
+
+    HMODULE hModule;
+    TCHAR home[_MAX_PATH];
+    rpython_startup_code();
+    RPyGilAllocate();
+
+    GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | 
+                       GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+                       (LPCTSTR)&_cffi_init, &hModule);
+    if (hModule == 0 ) {
+        /* TODO turn the int into a string with FormatMessage */
+        
+        _cffi_init_error("dladdr() failed: ", "");
+        return TRUE;
+    }
+    GetModuleFileName(hModule, home, _MAX_PATH);
+    if (pypy_setup_home(home, 1) != 0) {
+        _cffi_init_error("pypy_setup_home() failed", "");
+        return TRUE;
+    }
+    _cffi_ready = 1;
+    fprintf(stderr, "startup succeeded, home %s\n", home);
+    return TRUE;
+}
+
+RPY_EXPORTED
+int pypy_carefully_make_gil(const char *name)
+{
+    /* For CFFI: this initializes the GIL and loads the home path.
+       It can be called completely concurrently from unrelated threads.
+       It assumes that we don't hold the GIL before (if it exists), and we
+       don't hold it afterwards.
+    */
+    static INIT_ONCE s_init_once;
+
+    _cffi_module_name = name;    /* not really thread-safe, but better than
+                                    nothing */
+    InitOnceExecuteOnce(&s_init_once, _cffi_init, NULL, NULL);
+    return (int)_cffi_ready - 1;
+}'''
+else:
+    do_startup = r"""
 #include <stdio.h>
 #include <dlfcn.h>
 #include <pthread.h>
@@ -141,6 +198,7 @@
     pthread_once(&once_control, _cffi_init);
     return (int)_cffi_ready - 1;
 }
-"""])
+"""
+eci = ExternalCompilationInfo(separate_module_sources=[do_startup])
 
 declare_c_function = rffi.llexternal_use_eci(eci)
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -102,7 +102,7 @@
     fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()),
             space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM),
             space.wrap(0)],
-           """(_socket, fd, family, type, proto): 
+           """(_socket, fd, family, type, proto):
                  return _socket.fromfd(fd, family, type, proto)""")
 
     assert space.unwrap(space.call_method(fd, 'fileno'))
@@ -326,7 +326,7 @@
 
     def test_ntoa_exception(self):
         import _socket
-        raises(_socket.error, _socket.inet_ntoa, "ab")
+        raises(_socket.error, _socket.inet_ntoa, b"ab")
 
     def test_aton_exceptions(self):
         import _socket
@@ -418,7 +418,7 @@
         # it if there is no connection.
         try:
             s.connect(("www.python.org", 80))
-        except _socket.gaierror, ex:
+        except _socket.gaierror as ex:
             skip("GAIError - probably no connection: %s" % str(ex.args))
         name = s.getpeername() # Will raise socket.error if not connected
         assert name[1] == 80
@@ -465,7 +465,7 @@
         sizes = {socket.htonl: 32, socket.ntohl: 32,
                  socket.htons: 16, socket.ntohs: 16}
         for func, size in sizes.items():
-            mask = (1L<<size) - 1
+            mask = (1 << size) - 1
             for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
                 assert i & mask == func(func(i&mask)) & mask
 
@@ -493,7 +493,7 @@
                  socket.htons: 16, socket.ntohs: 16}
         for func, size in sizes.items():
             try:
-                func(1L << size)
+                func(1 << size)
             except OverflowError:
                 pass
             else:
@@ -574,7 +574,7 @@
         # connection.
         try:
             s.connect(("www.python.org", 80))
-        except _socket.gaierror, ex:
+        except _socket.gaierror as ex:
             skip("GAIError - probably no connection: %s" % str(ex.args))
         exc = raises(TypeError, s.send, None)
         assert str(exc.value) == "must be string or buffer, not None"
@@ -608,12 +608,12 @@
             s, addr = serversock.accept()
             assert not addr
 
-            s.send('X')
+            s.send(b'X')
             data = clientsock.recv(100)
-            assert data == 'X'
-            clientsock.send('Y')
+            assert data == b'X'
+            clientsock.send(b'Y')
             data = s.recv(100)
-            assert data == 'Y'
+            assert data == b'Y'
 
             clientsock.close()
             s.close()
@@ -640,12 +640,12 @@
     def test_connect_to_kernel_netlink_routing_socket(self):
         import _socket, os
         s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE)
-        assert s.getsockname() == (0L, 0L)
+        assert s.getsockname() == (0, 0)
         s.bind((0, 0))
         a, b = s.getsockname()
         assert a == os.getpid()
         assert b == 0
- 
+
 
 class AppTestPacket:
     def setup_class(cls):
@@ -711,20 +711,20 @@
         t, addr = self.serv.accept()
         cli.settimeout(1.0)
         # test recv() timeout
-        t.send('*')
+        t.send(b'*')
         buf = cli.recv(100)
-        assert buf == '*'
+        assert buf == b'*'
         raises(timeout, cli.recv, 100)
         # test that send() works
-        count = cli.send('!')
+        count = cli.send(b'!')
         assert count == 1
         buf = t.recv(1)
-        assert buf == '!'
+        assert buf == b'!'
         # test that sendall() works
-        count = cli.sendall('?')
+        count = cli.sendall(b'?')
         assert count is None
         buf = t.recv(1)
-        assert buf == '?'
+        assert buf == b'?'
         # speed up filling the buffers
         t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096)
         cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096)
@@ -732,14 +732,14 @@
         count = 0
         try:
             while 1:
-                count += cli.send('foobar' * 70)
+                count += cli.send(b'foobar' * 70)
         except timeout:
             pass
         t.recv(count)
         # test sendall() timeout
         try:
             while 1:
-                cli.sendall('foobar' * 70)
+                cli.sendall(b'foobar' * 70)
         except timeout:
             pass
         # done
@@ -749,13 +749,13 @@
     def test_recv_into(self):
         import socket
         import array
-        MSG = 'dupa was here\n'
+        MSG = b'dupa was here\n'
         cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         cli.connect(self.serv.getsockname())
         conn, addr = self.serv.accept()
         buf = buffer(MSG)
         conn.send(buf)
-        buf = array.array('c', ' ' * 1024)
+        buf = array.array('b', b' ' * 1024)
         nbytes = cli.recv_into(buf)
         assert nbytes == len(MSG)
         msg = buf.tostring()[:len(MSG)]
@@ -771,13 +771,13 @@
     def test_recvfrom_into(self):
         import socket
         import array
-        MSG = 'dupa was here\n'
+        MSG = b'dupa was here\n'
         cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         cli.connect(self.serv.getsockname())
         conn, addr = self.serv.accept()
         buf = buffer(MSG)
         conn.send(buf)
-        buf = array.array('c', ' ' * 1024)
+        buf = array.array('b', b' ' * 1024)
         nbytes, addr = cli.recvfrom_into(buf)
         assert nbytes == len(MSG)
         msg = buf.tostring()[:len(MSG)]
diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py
--- a/pypy/module/_vmprof/__init__.py
+++ b/pypy/module/_vmprof/__init__.py
@@ -11,6 +11,7 @@
     interpleveldefs = {
         'enable': 'interp_vmprof.enable',
         'disable': 'interp_vmprof.disable',
+        'write_all_code_objects': 'interp_vmprof.write_all_code_objects',
         'VMProfError': 'space.fromcache(interp_vmprof.Cache).w_VMProfError',
     }
 
diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py
--- a/pypy/module/_vmprof/interp_vmprof.py
+++ b/pypy/module/_vmprof/interp_vmprof.py
@@ -59,11 +59,21 @@
     'interval' is a float representing the sampling interval, in seconds.
     Must be smaller than 1.0
     """
+    w_modules = space.sys.get('modules')
+    if space.is_true(space.contains(w_modules, space.wrap('_continuation'))):
+        space.warn(space.wrap("Using _continuation/greenlet/stacklet together "
+                              "with vmprof will crash"),
+                   space.w_RuntimeWarning)
     try:
         rvmprof.enable(fileno, period)
     except rvmprof.VMProfError, e:
         raise VMProfError(space, e)
 
+def write_all_code_objects(space):
+    """ Needed on cpython, just empty function here
+    """
+    pass
+
 def disable(space):
     """Disable vmprof.  Remember to close the file descriptor afterwards
     if necessary.
diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py
--- a/pypy/module/cpyext/__init__.py
+++ b/pypy/module/cpyext/__init__.py
@@ -36,7 +36,6 @@
 import pypy.module.cpyext.object
 import pypy.module.cpyext.stringobject
 import pypy.module.cpyext.tupleobject
-import pypy.module.cpyext.ndarrayobject
 import pypy.module.cpyext.setobject
 import pypy.module.cpyext.dictobject
 import pypy.module.cpyext.intobject
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -143,7 +143,7 @@
         target.chmod(0444) # make the file read-only, to make sure that nobody
                            # edits it by mistake
 
-def copy_header_files(dstdir):
+def copy_header_files(dstdir, copy_numpy_headers):
     # XXX: 20 lines of code to recursively copy a directory, really??
     assert dstdir.check(dir=True)
     headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl')
@@ -151,15 +151,16 @@
         headers.append(udir.join(name))
     _copy_header_files(headers, dstdir)
 
-    try:
-        dstdir.mkdir('numpy')
-    except py.error.EEXIST:
-        pass
-    numpy_dstdir = dstdir / 'numpy'
+    if copy_numpy_headers:
+        try:
+            dstdir.mkdir('numpy')
+        except py.error.EEXIST:
+            pass
+        numpy_dstdir = dstdir / 'numpy'
 
-    numpy_include_dir = include_dir / 'numpy'
-    numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl')
-    _copy_header_files(numpy_headers, numpy_dstdir)
+        numpy_include_dir = include_dir / 'numpy'
+        numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl')
+        _copy_header_files(numpy_headers, numpy_dstdir)
 
 
 class NotSpecified(object):
@@ -442,8 +443,8 @@
 TYPES = {}
 GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur
     '_Py_NoneStruct#': ('PyObject*', 'space.w_None'),
-    '_Py_TrueStruct#': ('PyObject*', 'space.w_True'),
-    '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'),
+    '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'),
+    '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'),
     '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'),
     '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'),
     'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'),
@@ -482,7 +483,6 @@
         "PyComplex_Type": "space.w_complex",
         "PyByteArray_Type": "space.w_bytearray",
         "PyMemoryView_Type": "space.w_memoryview",
-        "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)",
         "PyBaseObject_Type": "space.w_object",
         'PyNone_Type': 'space.type(space.w_None)',
         'PyNotImplemented_Type': 'space.type(space.w_NotImplemented)',
@@ -506,7 +506,9 @@
 def get_structtype_for_ctype(ctype):
     from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
     from pypy.module.cpyext.cdatetime import PyDateTime_CAPI
+    from pypy.module.cpyext.intobject import PyIntObject
     return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr,
+            "PyIntObject*": PyIntObject,
             "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype]
 
 PyTypeObject = lltype.ForwardReference()
@@ -773,6 +775,8 @@
     "NOT_RPYTHON"
     from pypy.module.cpyext.pyobject import make_ref
 
+    use_micronumpy = setup_micronumpy(space)
+
     export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS)
     from rpython.translator.c.database import LowLevelDatabase
     db = LowLevelDatabase()
@@ -830,6 +834,7 @@
     space.fromcache(State).install_dll(eci)
 
     # populate static data
+    builder = StaticObjectBuilder(space)
     for name, (typ, expr) in GLOBALS.iteritems():
         from pypy.module import cpyext
         w_obj = eval(expr)
@@ -854,7 +859,7 @@
                 assert False, "Unknown static pointer: %s %s" % (typ, name)
             ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value),
                                     ctypes.c_void_p).value
-        elif typ in ('PyObject*', 'PyTypeObject*'):
+        elif typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'):
             if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'):
                 # we already have the pointer
                 in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name)
@@ -863,17 +868,10 @@
                 # we have a structure, get its address
                 in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name)
                 py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll))
-            from pypy.module.cpyext.pyobject import (
-                track_reference, get_typedescr)
-            w_type = space.type(w_obj)
-            typedescr = get_typedescr(w_type.instancetypedef)
-            py_obj.c_ob_refcnt = 1
-            py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr,
-                                         make_ref(space, w_type))
-            typedescr.attach(space, py_obj, w_obj)
-            track_reference(space, py_obj, w_obj)
+            builder.prepare(py_obj, w_obj)
         else:
             assert False, "Unknown static object: %s %s" % (typ, name)
+    builder.attach_all()
 
     pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI')
 
@@ -890,6 +888,36 @@
     setup_init_functions(eci, translating=False)
     return modulename.new(ext='')
 
+
+class StaticObjectBuilder:
+    def __init__(self, space):
+        self.space = space
+        self.to_attach = []
+
+    def prepare(self, py_obj, w_obj):
+        from pypy.module.cpyext.pyobject import track_reference
+        py_obj.c_ob_refcnt = 1
+        track_reference(self.space, py_obj, w_obj)
+        self.to_attach.append((py_obj, w_obj))
+
+    def attach_all(self):
+        from pypy.module.cpyext.pyobject import get_typedescr, make_ref
+        from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2
+        space = self.space
+        space._cpyext_type_init = []
+        for py_obj, w_obj in self.to_attach:
+            w_type = space.type(w_obj)
+            typedescr = get_typedescr(w_type.instancetypedef)
+            py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr,
+                                         make_ref(space, w_type))
+            typedescr.attach(space, py_obj, w_obj)
+        cpyext_type_init = space._cpyext_type_init
+        del space._cpyext_type_init
+        for pto, w_type in cpyext_type_init:
+            finish_type_1(space, pto)
+            finish_type_2(space, pto, w_type)
+
+
 def mangle_name(prefix, name):
     if name.startswith('Py'):
         return prefix + name[2:]
@@ -985,6 +1013,24 @@
     pypy_decl_h.write('\n'.join(pypy_decls))
     return functions
 
+separate_module_files = [source_dir / "varargwrapper.c",
+                         source_dir / "pyerrors.c",
+                         source_dir / "modsupport.c",
+                         source_dir / "getargs.c",
+                         source_dir / "abstract.c",
+                         source_dir / "stringobject.c",
+                         source_dir / "mysnprintf.c",
+                         source_dir / "pythonrun.c",
+                         source_dir / "sysmodule.c",
+                         source_dir / "bufferobject.c",
+                         source_dir / "cobject.c",
+                         source_dir / "structseq.c",
+                         source_dir / "capsule.c",
+                         source_dir / "pysignals.c",
+                         source_dir / "pythread.c",
+                         source_dir / "missing.c",
+                         ]
+
 def build_eci(building_bridge, export_symbols, code):
     "NOT_RPYTHON"
     # Build code and get pointer to the structure
@@ -1038,24 +1084,7 @@
 
     eci = ExternalCompilationInfo(
         include_dirs=include_dirs,
-        separate_module_files=[source_dir / "varargwrapper.c",
-                               source_dir / "pyerrors.c",
-                               source_dir / "modsupport.c",
-                               source_dir / "getargs.c",
-                               source_dir / "abstract.c",
-                               source_dir / "stringobject.c",
-                               source_dir / "mysnprintf.c",
-                               source_dir / "pythonrun.c",
-                               source_dir / "sysmodule.c",
-                               source_dir / "bufferobject.c",
-                               source_dir / "cobject.c",
-                               source_dir / "structseq.c",
-                               source_dir / "capsule.c",
-                               source_dir / "pysignals.c",
-                               source_dir / "pythread.c",
-                               source_dir / "ndarrayobject.c",
-                               source_dir / "missing.c",
-                               ],
+        separate_module_files= separate_module_files,
         separate_module_sources=separate_module_sources,
         compile_extra=compile_extra,
         **kwds
@@ -1063,10 +1092,22 @@
 
     return eci
 
+def setup_micronumpy(space):
+    use_micronumpy = space.config.objspace.usemodules.micronumpy
+    if not use_micronumpy:
+        return use_micronumpy
+    # import to register api functions by side-effect
+    import pypy.module.cpyext.ndarrayobject 
+    global GLOBALS, SYMBOLS_C, separate_module_files
+    GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)")
+    SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']
+    separate_module_files.append(source_dir / "ndarrayobject.c")
+    return use_micronumpy
 
 def setup_library(space):
     "NOT_RPYTHON"
     from pypy.module.cpyext.pyobject import make_ref
+    use_micronumpy = setup_micronumpy(space)
 
     export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS)
     from rpython.translator.c.database import LowLevelDatabase
@@ -1084,14 +1125,33 @@
     run_bootstrap_functions(space)
     setup_va_functions(eci)
 
+    from pypy.module import cpyext   # for eval() below
+
+    # Set up the types.  Needs a special case, because of the
+    # immediate cycle involving 'c_ob_type', and because we don't
+    # want these types to be Py_TPFLAGS_HEAPTYPE.
+    static_types = {}
+    for name, (typ, expr) in GLOBALS.items():
+        if typ == 'PyTypeObject*':
+            pto = lltype.malloc(PyTypeObject, immortal=True,
+                                zero=True, flavor='raw')
+            pto.c_ob_refcnt = 1
+            pto.c_tp_basicsize = -1
+            static_types[name] = pto
+    builder = StaticObjectBuilder(space)
+    for name, pto in static_types.items():
+        pto.c_ob_type = static_types['PyType_Type#']
+        w_type = eval(GLOBALS[name][1])
+        builder.prepare(rffi.cast(PyObject, pto), w_type)
+    builder.attach_all()
+
     # populate static data
     for name, (typ, expr) in GLOBALS.iteritems():
         name = name.replace("#", "")
         if name.startswith('PyExc_'):
             name = '_' + name
-        from pypy.module import cpyext
         w_obj = eval(expr)
-        if typ in ('PyObject*', 'PyTypeObject*'):
+        if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'):
             struct_ptr = make_ref(space, w_obj)
         elif typ == 'PyDateTime_CAPI*':
             continue
@@ -1108,7 +1168,7 @@
 
     setup_init_functions(eci, translating=True)
     trunk_include = pypydir.dirpath() / 'include'
-    copy_header_files(trunk_include)
+    copy_header_files(trunk_include, use_micronumpy)
 
 def _load_from_cffi(space, name, path, initptr):
     from pypy.module._cffi_backend import cffi1_module
diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py
--- a/pypy/module/cpyext/frameobject.py
+++ b/pypy/module/cpyext/frameobject.py
@@ -17,6 +17,7 @@
 PyFrameObjectFields = (PyObjectFields +
     (("f_code", PyCodeObject),
      ("f_globals", PyObject),
+     ("f_locals", PyObject),
      ("f_lineno", rffi.INT),
      ))
 cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct)
@@ -35,6 +36,7 @@
     py_frame = rffi.cast(PyFrameObject, py_obj)
     py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode))
     py_frame.c_f_globals = make_ref(space, frame.get_w_globals())
+    py_frame.c_f_locals = make_ref(space, frame.get_w_locals())
     rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno)
 
 @cpython_api([PyObject], lltype.Void, external=False)
@@ -43,6 +45,7 @@
     py_code = rffi.cast(PyObject, py_frame.c_f_code)
     Py_DecRef(space, py_code)
     Py_DecRef(space, py_frame.c_f_globals)
+    Py_DecRef(space, py_frame.c_f_locals)
     from pypy.module.cpyext.object import PyObject_dealloc
     PyObject_dealloc(space, py_obj)
 
@@ -72,6 +75,7 @@
     space.interp_w(PyCode, w_code) # sanity check
     py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code))
     py_frame.c_f_globals = make_ref(space, w_globals)
+    py_frame.c_f_locals = make_ref(space, w_locals)
     return py_frame
 
 @cpython_api([PyFrameObject], rffi.INT_real, error=-1)
diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h
--- a/pypy/module/cpyext/include/frameobject.h
+++ b/pypy/module/cpyext/include/frameobject.h
@@ -8,6 +8,7 @@
     PyObject_HEAD
     PyCodeObject *f_code;
     PyObject *f_globals;
+    PyObject *f_locals;
     int f_lineno;
 } PyFrameObject;
 
diff --git a/pypy/module/cpyext/include/stringobject.h b/pypy/module/cpyext/include/stringobject.h
--- a/pypy/module/cpyext/include/stringobject.h
+++ b/pypy/module/cpyext/include/stringobject.h
@@ -7,8 +7,8 @@
 extern "C" {
 #endif
 
-#define PyString_GET_SIZE(op) PyString_Size(op)
-#define PyString_AS_STRING(op) PyString_AsString(op)
+#define PyString_GET_SIZE(op) PyString_Size((PyObject*)(op))
+#define PyString_AS_STRING(op) PyString_AsString((PyObject*)(op))
 
 typedef struct {
     PyObject_HEAD
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -98,7 +98,7 @@
 
 
 def test_copy_header_files(tmpdir):
-    api.copy_header_files(tmpdir)
+    api.copy_header_files(tmpdir, True)
     def check(name):
         f = tmpdir.join(name)
         assert f.check(file=True)
diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py
--- a/pypy/module/cpyext/test/test_frameobject.py
+++ b/pypy/module/cpyext/test/test_frameobject.py
@@ -9,6 +9,7 @@
                  PyObject *py_srcfile = PyString_FromString("filename");
                  PyObject *py_funcname = PyString_FromString("funcname");
                  PyObject *py_globals = PyDict_New();
+                 PyObject *py_locals = PyDict_New();
                  PyObject *empty_string = PyString_FromString("");
                  PyObject *empty_tuple = PyTuple_New(0);
                  PyCodeObject *py_code;
@@ -39,7 +40,7 @@
                      PyThreadState_Get(), /*PyThreadState *tstate,*/
                      py_code,             /*PyCodeObject *code,*/
                      py_globals,          /*PyObject *globals,*/
-                     0                    /*PyObject *locals*/
+                     py_locals            /*PyObject *locals*/
                  );
                  if (!py_frame) goto bad;
                  py_frame->f_lineno = 48; /* Does not work with CPython */
@@ -51,6 +52,7 @@
                  Py_XDECREF(empty_string);
                  Py_XDECREF(empty_tuple);
                  Py_XDECREF(py_globals);
+                 Py_XDECREF(py_locals);
                  Py_XDECREF(py_code);
                  Py_XDECREF(py_frame);
                  return NULL;
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -374,6 +374,11 @@
         module = self.import_extension('foo', [
             ("test_type", "METH_O",
              '''
+                 /* "args->ob_type" is a strange way to get at 'type',
+                    which should have a different tp_getattro/tp_setattro
+                    than its tp_base, which is 'object'.
+                  */
+                  
                  if (!args->ob_type->tp_setattro)
                  {
                      PyErr_SetString(PyExc_ValueError, "missing tp_setattro");
@@ -382,8 +387,12 @@
                  if (args->ob_type->tp_setattro ==
                      args->ob_type->tp_base->tp_setattro)
                  {
-                     PyErr_SetString(PyExc_ValueError, "recursive tp_setattro");
-                     return NULL;
+                     /* Note that unlike CPython, in PyPy 'type.tp_setattro'
+                        is the same function as 'object.tp_setattro'.  This
+                        test used to check that it was not, but that was an
+                        artifact of the bootstrap logic only---in the final
+                        C sources I checked and they are indeed the same.
+                        So we ignore this problem here. */
                  }
                  if (!args->ob_type->tp_getattro)
                  {
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -146,7 +146,7 @@
             assert len(slot_names) == 2
             struct = getattr(pto, slot_names[0])
             if not struct:
-                assert not space.config.translating
+                #assert not space.config.translating
                 assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
                 if slot_names[0] == 'c_tp_as_number':
                     STRUCT_TYPE = PyNumberMethods
@@ -310,55 +310,6 @@
                    realize=type_realize,
                    dealloc=type_dealloc)
 
-    # some types are difficult to create because of cycles.
-    # - object.ob_type = type
-    # - type.ob_type   = type
-    # - tuple.ob_type  = type
-    # - type.tp_base   = object
-    # - tuple.tp_base  = object
-    # - type.tp_bases is a tuple
-    # - object.tp_bases is a tuple
-    # - tuple.tp_bases is a tuple
-
-    # insert null placeholders to please create_ref()
-    track_reference(space, lltype.nullptr(PyObject.TO), space.w_type)
-    track_reference(space, lltype.nullptr(PyObject.TO), space.w_object)
-    track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple)
-    track_reference(space, lltype.nullptr(PyObject.TO), space.w_str)
-
-    # create the objects
-    py_type = create_ref(space, space.w_type)
-    py_object = create_ref(space, space.w_object)
-    py_tuple = create_ref(space, space.w_tuple)
-    py_str = create_ref(space, space.w_str)
-    # XXX py_str is not initialized here correctly, because we are
-    #     not tracking it, it gets an empty c_ob_type from py_basestring
-
-    # form cycles
-    pto_type = rffi.cast(PyTypeObjectPtr, py_type)
-    py_type.c_ob_type = pto_type
-    py_object.c_ob_type = pto_type
-    py_tuple.c_ob_type = pto_type
-
-    pto_object = rffi.cast(PyTypeObjectPtr, py_object)
-    pto_type.c_tp_base = pto_object
-    pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple)
-    pto_tuple.c_tp_base = pto_object
-
-    pto_type.c_tp_bases.c_ob_type = pto_tuple
-    pto_object.c_tp_bases.c_ob_type = pto_tuple
-    pto_tuple.c_tp_bases.c_ob_type = pto_tuple
-
-    for typ in (py_type, py_object, py_tuple, py_str):
-        heaptype = rffi.cast(PyHeapTypeObject, typ)
-        heaptype.c_ht_name.c_ob_type = pto_type
-
-    # Restore the mapping
-    track_reference(space, py_type, space.w_type, replace=True)
-    track_reference(space, py_object, space.w_object, replace=True)
-    track_reference(space, py_tuple, space.w_tuple, replace=True)
-    track_reference(space, py_str, space.w_str, replace=True)
-
 
 @cpython_api([PyObject], lltype.Void, external=False)
 def subtype_dealloc(space, obj):
@@ -476,6 +427,8 @@
     pto.c_tp_as_sequence = heaptype.c_as_sequence
     pto.c_tp_as_mapping = heaptype.c_as_mapping
     pto.c_tp_as_buffer = heaptype.c_as_buffer
+    pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
+    pto.c_tp_itemsize = 0
 
     return rffi.cast(PyObject, heaptype)
 
@@ -511,8 +464,6 @@
         pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name)
     else:
         pto.c_tp_name = rffi.str2charp(w_type.name)
-    pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
-    pto.c_tp_itemsize = 0
     # uninitialized fields:
     # c_tp_print, c_tp_getattr, c_tp_setattr
     # XXX implement
@@ -520,8 +471,11 @@
     w_base = best_base(space, w_type.bases_w)
     pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base))
 
-    finish_type_1(space, pto)
-    finish_type_2(space, pto, w_type)
+    if hasattr(space, '_cpyext_type_init'):
+        space._cpyext_type_init.append((pto, w_type))
+    else:
+        finish_type_1(space, pto)
+        finish_type_2(space, pto, w_type)
 
     pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct)
     if pto.c_tp_base:
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -116,7 +116,7 @@
 
     def _find_map_attr(self, name, index):
         while isinstance(self, PlainAttribute):
-            if name == self.name and index == self.index:
+            if index == self.index and name == self.name:
                 return self
             self = self.back
         return None
@@ -168,7 +168,6 @@
             jit.isconstant(name) and
             jit.isconstant(index))
     def add_attr(self, obj, name, index, w_value):
-        # grumble, jit needs this
         attr = self._get_new_attr(name, index)
         oldattr = obj._get_mapdict_map()
         if not jit.we_are_jitted():
@@ -305,7 +304,7 @@
         new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value)
 
     def delete(self, obj, name, index):
-        if name == self.name and index == self.index:
+        if index == self.index and name == self.name:
             # ok, attribute is deleted
             if not self.ever_mutated:
                 self.ever_mutated = True
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -942,7 +942,7 @@
             return False
         if w_set.length() == 0:
             return True
-        # it's possible to have 0-lenght strategy that's not empty
+        # it's possible to have 0-length strategy that's not empty
         if w_set.strategy is w_other.strategy:
             return self._issubset_unwrapped(w_set, w_other)
         if not self.may_contain_equal_elements(w_other.strategy):
diff --git a/pypy/test_all.py b/pypy/test_all.py
--- a/pypy/test_all.py
+++ b/pypy/test_all.py
@@ -26,11 +26,10 @@
     #Add toplevel repository dir to sys.path
     sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
     import pytest
-    import pytest_cov
     if sys.platform == 'win32':
         #Try to avoid opeing a dialog box if one of the tests causes a system error
         # We do this in runner.py, but buildbots run twisted which ruins inheritance
-        # in windows subprocesses. 
+        # in windows subprocesses.
         import ctypes
         winapi = ctypes.windll.kernel32
         SetErrorMode = winapi.SetErrorMode
@@ -44,4 +43,4 @@
         old_mode = SetErrorMode(flags)
         SetErrorMode(old_mode | flags)
 
-    sys.exit(pytest.main(plugins=[pytest_cov]))
+    sys.exit(pytest.main())
diff --git a/pytest_cov.py b/pytest_cov.py
deleted file mode 100644
--- a/pytest_cov.py
+++ /dev/null
@@ -1,353 +0,0 @@
-"""produce code coverage reports using the 'coverage' package, including support for distributed testing.
-
-This plugin produces coverage reports.  It supports centralised testing and distributed testing in
-both load and each modes.  It also supports coverage of subprocesses.
-
-All features offered by the coverage package should be available, either through pytest-cov or
-through coverage's config file.
-
-
-Installation
-------------
-
-The `pytest-cov`_ package may be installed with pip or easy_install::
-
-    pip install pytest-cov
-    easy_install pytest-cov
-
-.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/
-
-
-Uninstallation
---------------
-
-Uninstalling packages is supported by pip::
-
-    pip uninstall pytest-cov
-
-However easy_install does not provide an uninstall facility.
-
-.. IMPORTANT::
-
-    Ensure that you manually delete the init_cov_core.pth file in your site-packages directory.
-
-    This file starts coverage collection of subprocesses if appropriate during site initialisation
-    at python startup.
-
-
-Usage
------
-
-Centralised Testing
-~~~~~~~~~~~~~~~~~~~
-
-Centralised testing will report on the combined coverage of the main process and all of it's
-subprocesses.
-
-Running centralised testing::
-
-    py.test --cov myproj tests/
-
-Shows a terminal report::
-
-    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Miss  Cover
-    ----------------------------------------
-    myproj/__init__          2      0   100%
-    myproj/myproj          257     13    94%
-    myproj/feature4286      94      7    92%
-    ----------------------------------------
-    TOTAL                  353     20    94%
-
-
-Distributed Testing: Load
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Distributed testing with dist mode set to load will report on the combined coverage of all slaves.
-The slaves may be spread out over any number of hosts and each slave may be located anywhere on the
-file system.  Each slave will have it's subprocesses measured.
-
-Running distributed testing with dist mode set to load::
-
-    py.test --cov myproj -n 2 tests/
-
-Shows a terminal report::
-
-    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Miss  Cover
-    ----------------------------------------
-    myproj/__init__          2      0   100%
-    myproj/myproj          257     13    94%
-    myproj/feature4286      94      7    92%
-    ----------------------------------------
-    TOTAL                  353     20    94%
-
-
-Again but spread over different hosts and different directories::
-
-    py.test --cov myproj --dist load
-            --tx ssh=memedough at host1//chdir=testenv1
-            --tx ssh=memedough at host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python
-            --rsyncdir myproj --rsyncdir tests --rsync examples
-            tests/
-
-Shows a terminal report::
-
-    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Miss  Cover
-    ----------------------------------------
-    myproj/__init__          2      0   100%
-    myproj/myproj          257     13    94%
-    myproj/feature4286      94      7    92%
-    ----------------------------------------
-    TOTAL                  353     20    94%
-
-
-Distributed Testing: Each
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Distributed testing with dist mode set to each will report on the combined coverage of all slaves.
-Since each slave is running all tests this allows generating a combined coverage report for multiple
-environments.
-
-Running distributed testing with dist mode set to each::
-
-    py.test --cov myproj --dist each
-            --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python
-            --tx ssh=memedough at host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python
-            --rsyncdir myproj --rsyncdir tests --rsync examples
-            tests/
-
-Shows a terminal report::
-
-    ---------------------------------------- coverage ----------------------------------------
-                              platform linux2, python 2.6.5-final-0
-                              platform linux2, python 2.7.0-final-0
-    Name                 Stmts   Miss  Cover
-    ----------------------------------------
-    myproj/__init__          2      0   100%
-    myproj/myproj          257     13    94%
-    myproj/feature4286      94      7    92%
-    ----------------------------------------
-    TOTAL                  353     20    94%
-
-
-Reporting
----------
-
-It is possible to generate any combination of the reports for a single test run.
-
-The available reports are terminal (with or without missing line numbers shown), HTML, XML and
-annotated source code.
-
-The terminal report without line numbers (default)::
-
-    py.test --cov-report term --cov myproj tests/
-
-    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Miss  Cover
-    ----------------------------------------
-    myproj/__init__          2      0   100%
-    myproj/myproj          257     13    94%
-    myproj/feature4286      94      7    92%
-    ----------------------------------------
-    TOTAL                  353     20    94%
-
-
-The terminal report with line numbers::
-
-    py.test --cov-report term-missing --cov myproj tests/
-
-    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Miss  Cover   Missing
-    --------------------------------------------------
-    myproj/__init__          2      0   100%
-    myproj/myproj          257     13    94%   24-26, 99, 149, 233-236, 297-298, 369-370
-    myproj/feature4286      94      7    92%   183-188, 197
-    --------------------------------------------------
-    TOTAL                  353     20    94%
-
-
-The remaining three reports output to files without showing anything on the terminal (useful for
-when the output is going to a continuous integration server)::
-
-    py.test --cov-report html
-            --cov-report xml
-            --cov-report annotate
-            --cov myproj tests/
-
-
-Coverage Data File
-------------------
-
-The data file is erased at the beginning of testing to ensure clean data for each test run.
-
-The data file is left at the end of testing so that it is possible to use normal coverage tools to
-examine it.
-
-
-Coverage Config File
---------------------
-
-This plugin provides a clean minimal set of command line options that are added to pytest.  For
-further control of coverage use a coverage config file.
-
-For example if tests are contained within the directory tree being measured the tests may be
-excluded if desired by using a .coveragerc file with the omit option set::
-
-    py.test --cov-config .coveragerc
-            --cov myproj
-            myproj/tests/
-
-Where the .coveragerc file contains file globs::
-
-    [run]
-    omit = tests/*
-
-For full details refer to the `coverage config file`_ documentation.
-
-.. _`coverage config file`: http://nedbatchelder.com/code/coverage/config.html
-
-Note that this plugin controls some options and setting the option in the config file will have no
-effect.  These include specifying source to be measured (source option) and all data file handling
-(data_file and parallel options).
-
-
-Limitations
------------
-
-For distributed testing the slaves must have the pytest-cov package installed.  This is needed since
-the plugin must be registered through setuptools / distribute for pytest to start the plugin on the
-slave.
-
-For subprocess measurement environment variables must make it from the main process to the
-subprocess.  The python used by the subprocess must have pytest-cov installed.  The subprocess must
-do normal site initialisation so that the environment variables can be detected and coverage
-started.
-
-
-Acknowledgements
-----------------
-
-Whilst this plugin has been built fresh from the ground up it has been influenced by the work done
-on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are
-other coverage plugins.
-
-Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs.
-
-Holger Krekel for pytest with its distributed testing support.
-
-Jason Pellerin for nose.
-
-Michael Foord for unittest2.
-
-No doubt others have contributed to these tools as well.
-"""
-
-
-def pytest_addoption(parser):
-    """Add options to control coverage."""
-
-    group = parser.getgroup('coverage reporting with distributed testing support')
-    group.addoption('--cov', action='append', default=[], metavar='path',
-                    dest='cov_source',
-                    help='measure coverage for filesystem path (multi-allowed)')
-    group.addoption('--cov-report', action='append', default=[], metavar='type',
-                    choices=['term', 'term-missing', 'annotate', 'html', 'xml'],
-                    dest='cov_report',
-                    help='type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)')
-    group.addoption('--cov-config', action='store', default='.coveragerc', metavar='path',
-                    dest='cov_config',
-                    help='config file for coverage, default: .coveragerc')
-
-
-def pytest_configure(config):
-    """Activate coverage plugin if appropriate."""
-
-    if config.getvalue('cov_source'):
-        config.pluginmanager.register(CovPlugin(), '_cov')
-
-
-class CovPlugin(object):
-    """Use coverage package to produce code coverage reports.
-
-    Delegates all work to a particular implementation based on whether
-    this test process is centralised, a distributed master or a
-    distributed slave.
-    """
-
-    def __init__(self):
-        """Creates a coverage pytest plugin.
-
-        We read the rc file that coverage uses to get the data file
-        name.  This is needed since we give coverage through it's API
-        the data file name.
-        """
-
-        # Our implementation is unknown at this time.
-        self.cov_controller = None
-
-    def pytest_sessionstart(self, session):
-        """At session start determine our implementation and delegate to it."""
-
-        import cov_core
-
-        cov_source = session.config.getvalue('cov_source')
-        cov_report = session.config.getvalue('cov_report') or ['term']
-        cov_config = session.config.getvalue('cov_config')
-
-        session_name = session.__class__.__name__
-        is_master = (session.config.pluginmanager.hasplugin('dsession') or
-                     session_name == 'DSession')
-        is_slave = (hasattr(session.config, 'slaveinput') or
-                    session_name == 'SlaveSession')
-        nodeid = None
-
-        if is_master:
-            controller_cls = cov_core.DistMaster
-        elif is_slave:
-            controller_cls = cov_core.DistSlave
-            nodeid = session.config.slaveinput.get('slaveid', getattr(session, 'nodeid'))
-        else:
-            controller_cls = cov_core.Central
-
-        self.cov_controller = controller_cls(cov_source,
-                                             cov_report,
-                                             cov_config,
-                                             session.config,
-                                             nodeid)
-
-        self.cov_controller.start()
-
-    def pytest_configure_node(self, node):
-        """Delegate to our implementation."""
-
-        self.cov_controller.configure_node(node)
-    pytest_configure_node.optionalhook = True
-
-    def pytest_testnodedown(self, node, error):
-        """Delegate to our implementation."""
-
-        self.cov_controller.testnodedown(node, error)
-    pytest_testnodedown.optionalhook = True
-
-    def pytest_sessionfinish(self, session, exitstatus):
-        """Delegate to our implementation."""
-
-        self.cov_controller.finish()
-
-    def pytest_terminal_summary(self, terminalreporter):
-        """Delegate to our implementation."""
-
-        self.cov_controller.summary(terminalreporter._tw)
-
-
-def pytest_funcarg__cov(request):
-    """A pytest funcarg that provides access to the underlying coverage object."""
-
-    # Check with hasplugin to avoid getplugin exception in older pytest.
-    if request.config.pluginmanager.hasplugin('_cov'):
-        plugin = request.config.pluginmanager.getplugin('_cov')
-        if plugin.cov_controller:
-            return plugin.cov_controller.cov
-    return None
diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py
--- a/rpython/annotator/builtin.py
+++ b/rpython/annotator/builtin.py
@@ -39,8 +39,9 @@
         return s_result
     s_realresult = immutablevalue(realresult)
     if not s_result.contains(s_realresult):
-        raise Exception("%s%r returned %r, which is not contained in %s" % (
-            func, args, realresult, s_result))
+        raise AnnotatorError(
+            "%s%r returned %r, which is not contained in %s" % (
+                func, args, realresult, s_result))
     return s_realresult
 
 # ____________________________________________________________
@@ -56,14 +57,14 @@
         s_start, s_stop = args[:2]
         s_step = args[2]
     else:
-        raise Exception("range() takes 1 to 3 arguments")
+        raise AnnotatorError("range() takes 1 to 3 arguments")
     empty = False  # so far
     if not s_step.is_constant():
         step = 0 # this case signals a variable step
     else:
         step = s_step.const
         if step == 0:
-            raise Exception("range() with step zero")
+            raise AnnotatorError("range() with step zero")
         if s_start.is_constant() and s_stop.is_constant():
             try:
                 if len(xrange(s_start.const, s_stop.const, step)) == 0:
@@ -285,7 +286,8 @@
 else:
     @analyzer_for(unicodedata.decimal)
     def unicodedata_decimal(s_uchr):
-        raise TypeError("unicodedate.decimal() calls should not happen at interp-level")
+        raise AnnotatorError(
+            "unicodedate.decimal() calls should not happen at interp-level")
 
 @analyzer_for(OrderedDict)
 def analyze():
@@ -299,9 +301,9 @@
 @analyzer_for(weakref.ref)
 def weakref_ref(s_obj):
     if not isinstance(s_obj, SomeInstance):
-        raise Exception("cannot take a weakref to %r" % (s_obj,))
+        raise AnnotatorError("cannot take a weakref to %r" % (s_obj,))
     if s_obj.can_be_None:
-        raise Exception("should assert that the instance we take "
+        raise AnnotatorError("should assert that the instance we take "
                         "a weakref to cannot be None")
     return SomeWeakRef(s_obj.classdef)
 
@@ -311,3 +313,14 @@
 @analyzer_for(rpython.rlib.objectmodel.free_non_gc_object)
 def robjmodel_free_non_gc_object(obj):
     pass
+
+#________________________________
+# pdb
+
+import pdb
+
+ at analyzer_for(pdb.set_trace)
+def pdb_set_trace(*args_s):
+    raise AnnotatorError(
+        "you left pdb.set_trace() in your interpreter! "
+        "If you want to attach a gdb instead, call rlib.debug.attach_gdb()")
diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py
@@ -0,0 +1,86 @@
+
+import os, py
+from rpython.jit.backend.test.support import CCompiledMixin
+from rpython.rlib.jit import JitDriver
+from rpython.tool.udir import udir
+from rpython.translator.translator import TranslationContext
+from rpython.jit.backend.detect_cpu import getcpuclass
+
+class CompiledVmprofTest(CCompiledMixin):
+    CPUClass = getcpuclass()
+
+    def _get_TranslationContext(self):
+        t = TranslationContext()
+        t.config.translation.gc = 'incminimark'
+        t.config.translation.list_comprehension_operations = True
+        return t
+
+    def test_vmprof(self):
+        from rpython.rlib import rvmprof
+
+        class MyCode:
+            _vmprof_unique_id = 0
+            def __init__(self, name):
+                self.name = name
+
+        def get_name(code):
+            return code.name
+
+        code2 = MyCode("py:y:foo:4")
+        rvmprof.register_code(code2, get_name)
+
+        try:
+            rvmprof.register_code_object_class(MyCode, get_name)
+        except rvmprof.VMProfPlatformUnsupported, e:
+            py.test.skip(str(e))
+
+        def get_unique_id(code):
+            return rvmprof.get_unique_id(code)
+
+        driver = JitDriver(greens = ['code'], reds = ['i', 's', 'num'],
+            is_recursive=True, get_unique_id=get_unique_id)
+
+        @rvmprof.vmprof_execute_code("xcode13", lambda code, num: code)
+        def main(code, num):
+            return main_jitted(code, num)
+
+        def main_jitted(code, num):
+            s = 0
+            i = 0
+            while i < num:
+                driver.jit_merge_point(code=code, i=i, s=s, num=num)
+                s += (i << 1)
+                if i % 3 == 0 and code is not code2:
+                    main(code2, 100)
+                i += 1
+            return s
+
+        tmpfilename = str(udir.join('test_rvmprof'))
+
+        def f(num):
+            code = MyCode("py:x:foo:3")
+            rvmprof.register_code(code, get_name)
+            fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666)
+            period = 0.0001
+            rvmprof.enable(fd, period)
+            res = main(code, num)
+            #assert res == 499999500000
+            rvmprof.disable()
+            os.close(fd)
+            return 0
+        
+        def check_vmprof_output():
+            from vmprof import read_profile
+            tmpfile = str(udir.join('test_rvmprof'))
+            stats = read_profile(tmpfile)
+            t = stats.get_tree()
+            assert t.name == 'py:x:foo:3'
+            assert len(t.children) == 1 # jit
+
+        self.meta_interp(f, [1000000], inline=True)
+        try:
+            import vmprof
+        except ImportError:
+            pass
+        else:
+            check_vmprof_output()
\ No newline at end of file
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -0,0 +1,49 @@
+import py
+from rpython.rlib import jit
+from rpython.rtyper.annlowlevel import llhelper
+from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rlib.rvmprof import cintf
+from rpython.jit.backend.x86.arch import WORD
+from rpython.jit.codewriter.policy import JitPolicy
+
+class BaseRVMProfTest(object):
+    def test_one(self):
+        py.test.skip("needs thread-locals in the JIT, which is only available "
+                     "after translation")
+        visited = []
+
+        def helper():
+            stack = cintf.vmprof_tl_stack.getraw()
+            if stack:
+                # not during tracing
+                visited.append(stack.c_value)
+            else:
+                visited.append(0)
+
+        llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
+
+        driver = jit.JitDriver(greens=[], reds='auto')
+
+        def f(n):
+            i = 0
+            while i < n:
+                driver.jit_merge_point()
+                i += 1
+                llfn()
+
+        class Hooks(jit.JitHookInterface):
+            def after_compile(self, debug_info):
+                self.raw_start = debug_info.asminfo.rawstart
+
+        hooks = Hooks()
+
+        null = lltype.nullptr(cintf.VMPROFSTACK)
+        cintf.vmprof_tl_stack.setraw(null)   # make it empty
+        self.meta_interp(f, [10], policy=JitPolicy(hooks))
+        v = set(visited)
+        assert 0 in v
+        v.remove(0)
+        assert len(v) == 1
+        assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
+        assert cintf.vmprof_tl_stack.getraw() == null
+        # ^^^ make sure we didn't leave anything dangling
diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py
--- a/rpython/jit/backend/x86/arch.py
+++ b/rpython/jit/backend/x86/arch.py
@@ -34,7 +34,7 @@
 
 if WORD == 4:
     # ebp + ebx + esi + edi + 15 extra words = 19 words
-    FRAME_FIXED_SIZE = 19
+    FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof, XXX make more compact!
     PASS_ON_MY_FRAME = 15
     JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float
     # 'threadlocal_addr' is passed as 2nd argument on the stack,
@@ -44,7 +44,7 @@
     THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD
 else:
     # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19
-    FRAME_FIXED_SIZE = 19
+    FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof, XXX make more compact!
     PASS_ON_MY_FRAME = 12
     JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM
     # 'threadlocal_addr' is passed as 2nd argument in %esi,
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -11,7 +11,7 @@
 from rpython.jit.metainterp.compile import ResumeGuardDescr
 from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory
 from rpython.rtyper.lltypesystem.lloperation import llop
-from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref
+from rpython.rtyper.annlowlevel import cast_instance_to_gcref
 from rpython.rtyper import rclass
 from rpython.rlib.jit import AsmInfo
 from rpython.jit.backend.model import CompiledLoopToken
@@ -999,11 +999,56 @@
         else:
             return FRAME_FIXED_SIZE
 
+    def _call_header_vmprof(self):
+        from rpython.rlib.rvmprof.rvmprof import cintf, VMPROF_JITTED_TAG
+
+        # tloc = address of pypy_threadlocal_s
+        if IS_X86_32:
+            # Can't use esi here, its old value is not saved yet.
+            # But we can use eax and ecx.
+            self.mc.MOV_rs(edx.value, THREADLOCAL_OFS)
+            tloc = edx
+            old = ecx
+        else:
+            # The thread-local value is already in esi.
+            # We should avoid if possible to use ecx or edx because they
+            # would be used to pass arguments #3 and #4 (even though, so
+            # far, the assembler only receives two arguments).
+            tloc = esi
+            old = r11
+        # eax = address in the stack of a 3-words struct vmprof_stack_s
+        self.mc.LEA_rs(eax.value, (FRAME_FIXED_SIZE - 4) * WORD)
+        # old = current value of vmprof_tl_stack
+        offset = cintf.vmprof_tl_stack.getoffset()
+        self.mc.MOV_rm(old.value, (tloc.value, offset))
+        # eax->next = old
+        self.mc.MOV_mr((eax.value, 0), old.value)
+        # eax->value = my esp
+        self.mc.MOV_mr((eax.value, WORD), esp.value)
+        # eax->kind = VMPROF_JITTED_TAG
+        self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG)
+        # save in vmprof_tl_stack the new eax
+        self.mc.MOV_mr((tloc.value, offset), eax.value)
+
+    def _call_footer_vmprof(self):
+        from rpython.rlib.rvmprof.rvmprof import cintf
+        # edx = address of pypy_threadlocal_s
+        self.mc.MOV_rs(edx.value, THREADLOCAL_OFS)
+        self.mc.AND_ri(edx.value, ~1)
+        # eax = (our local vmprof_tl_stack).next
+        self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD)
+        # save in vmprof_tl_stack the value eax
+        offset = cintf.vmprof_tl_stack.getoffset()
+        self.mc.MOV_mr((edx.value, offset), eax.value)
+
     def _call_header(self):
         self.mc.SUB_ri(esp.value, self._get_whole_frame_size() * WORD)
         self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value)
         if IS_X86_64:
             self.mc.MOV_sr(THREADLOCAL_OFS, esi.value)
+        if not self.cpu.gc_ll_descr.stm and self.cpu.translate_support_code:
+            self._call_header_vmprof()     # on X86_64, this uses esi
+        if IS_X86_64:
             self.mc.MOV_rr(ebp.value, edi.value)
         else:
             self.mc.MOV_rs(ebp.value, (self._get_whole_frame_size() + 1) * WORD)
@@ -1041,6 +1086,8 @@
             self._call_footer_shadowstack()
 
         # the return value is the jitframe
+        if not self.cpu.gc_ll_descr.stm and self.cpu.translate_support_code:
+            self._call_footer_vmprof()
         self.mc.MOV_rr(eax.value, ebp.value)
 
         for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1):
diff --git a/rpython/jit/backend/x86/test/test_rvmprof.py b/rpython/jit/backend/x86/test/test_rvmprof.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/x86/test/test_rvmprof.py
@@ -0,0 +1,7 @@
+
+import py
+from rpython.jit.backend.test.test_rvmprof import BaseRVMProfTest
+from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
+
+class TestFfiCall(Jit386Mixin, BaseRVMProfTest):
+    pass
\ No newline at end of file
diff --git a/rpython/jit/backend/x86/test/test_zrpy_vmprof.py b/rpython/jit/backend/x86/test/test_zrpy_vmprof.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/x86/test/test_zrpy_vmprof.py
@@ -0,0 +1,7 @@
+
+from rpython.jit.backend.llsupport.test.zrpy_vmprof_test import CompiledVmprofTest
+
+class TestZVMprof(CompiledVmprofTest):
+    
+    gcrootfinder = "shadowstack"
+    gc = "incminimark"
\ No newline at end of file
diff --git a/rpython/jit/backend/x86/test/test_zvmprof.py b/rpython/jit/backend/x86/test/test_zvmprof.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/x86/test/test_zvmprof.py
@@ -0,0 +1,7 @@
+
+from rpython.jit.backend.llsupport.test.zrpy_vmprof_test import CompiledVmprofTest
+
+class TestZVMprof(CompiledVmprofTest):
+    
+    gcrootfinder = "shadowstack"
+    gc = "incminimark"
\ No newline at end of file
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -1344,7 +1344,7 @@
     tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_',
                                loop_invariant=loop_inv)
     OS_THREADLOCALREF_GET = effectinfo.EffectInfo.OS_THREADLOCALREF_GET
-    c = const(tlfield.offset)
+    c = const(tlfield.getoffset())
     v = varoftype(lltype.Signed)
     op = SpaceOperation('threadlocalref_get', [c], v)
     cc = FakeBuiltinCallControl()
diff --git a/rpython/jit/metainterp/quasiimmut.py b/rpython/jit/metainterp/quasiimmut.py
--- a/rpython/jit/metainterp/quasiimmut.py
+++ b/rpython/jit/metainterp/quasiimmut.py
@@ -51,6 +51,7 @@
 class QuasiImmut(object):
     llopaque = True
     compress_limit = 30
+    looptokens_wrefs = None
 
     def __init__(self, cpu):
         self.cpu = cpu
@@ -75,7 +76,7 @@
     def compress_looptokens_list(self):
         self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs
                                       if wref() is not None]
-        # NB. we must keep around the looptoken_wrefs that are
+        # NB. we must keep around the looptokens_wrefs that are
         # already invalidated; see below
         self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2
 
@@ -83,6 +84,9 @@
         # When this is called, all the loops that we record become
         # invalid: all GUARD_NOT_INVALIDATED in these loops (and
         # in attached bridges) must now fail.
+        if self.looptokens_wrefs is None:
+            # can't happen, but helps compiled tests
+            return
         wrefs = self.looptokens_wrefs
         self.looptokens_wrefs = []
         for wref in wrefs:
diff --git a/rpython/jit/metainterp/test/test_jitdriver.py b/rpython/jit/metainterp/test/test_jitdriver.py
--- a/rpython/jit/metainterp/test/test_jitdriver.py
+++ b/rpython/jit/metainterp/test/test_jitdriver.py
@@ -193,7 +193,7 @@
             return pc + 1
         
         driver = JitDriver(greens=["pc"], reds='auto',
-                           get_unique_id=get_unique_id)
+                           get_unique_id=get_unique_id, is_recursive=True)
 
         def f(arg):
             i = 0
diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py
--- a/rpython/jit/metainterp/test/test_recursive.py
+++ b/rpython/jit/metainterp/test/test_recursive.py
@@ -1312,7 +1312,7 @@
                 return (code + 1) * 2
 
             driver = JitDriver(greens=["pc", "code"], reds='auto',
-                               get_unique_id=get_unique_id)
+                               get_unique_id=get_unique_id, is_recursive=True)
 
             def f(pc, code):
                 i = 0
diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
--- a/rpython/rlib/jit.py
+++ b/rpython/rlib/jit.py
@@ -624,6 +624,8 @@
             raise AttributeError("no 'greens' or 'reds' supplied")
         if virtualizables is not None:
             self.virtualizables = virtualizables
+        if get_unique_id is not None:
+            assert is_recursive, "get_unique_id and is_recursive must be specified at the same time"
         for v in self.virtualizables:
             assert v in self.reds
         # if reds are automatic, they won't be passed to jit_merge_point, so
diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py
--- a/rpython/rlib/rthread.py
+++ b/rpython/rlib/rthread.py
@@ -320,7 +320,7 @@
         offset = CDefinedIntSymbolic('RPY_TLOFS_%s' % self.fieldname,
                                      default='?')
         offset.loop_invariant = loop_invariant
-        self.offset = offset
+        self._offset = offset
 
         # for STM only
         PSTRUCTTYPE = _field2structptr(FIELDTYPE)
@@ -387,7 +387,7 @@
         ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id,
                                   loop_invariant=loop_invariant)
         setraw = self.setraw
-        offset = self.offset
+        offset = self._offset
 
         def get():
             if we_are_translated():
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -5,41 +5,41 @@
 from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
 from rpython.rtyper.tool import rffi_platform as platform
+from rpython.rlib import rthread
 
 from rpython.jit.backend import detect_cpu
 
 class VMProfPlatformUnsupported(Exception):
     pass
 
+ROOT = py.path.local(rpythonroot).join('rpython', 'rlib', 'rvmprof')
+SRC = ROOT.join('src')
+
+if sys.platform.startswith('linux'):
+    _libs = ['dl']
+else:
+    _libs = []
+eci_kwds = dict(
+    include_dirs = [SRC],
+    includes = ['rvmprof.h'],
+    libraries = _libs,
+    separate_module_files = [SRC.join('rvmprof.c')],
+    post_include_bits=['#define RPYTHON_VMPROF\n'],
+    )
+global_eci = ExternalCompilationInfo(**eci_kwds)
+
+
 def setup():
     if not detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64):
         raise VMProfPlatformUnsupported("rvmprof only supports"
                                         " x86-64 CPUs for now")
 
-
-    ROOT = py.path.local(rpythonroot).join('rpython', 'rlib', 'rvmprof')
-    SRC = ROOT.join('src')
-
-
-    if sys.platform.startswith('linux'):
-        libs = ['dl']
-    else:
-        libs = []
-
-    eci_kwds = dict(
-        include_dirs = [SRC],
-        includes = ['rvmprof.h'],
-        libraries = libs,
-        separate_module_files = [SRC.join('rvmprof.c')],
-        post_include_bits=['#define RPYTHON_VMPROF\n'],
-        )
-    eci = ExternalCompilationInfo(**eci_kwds)
-
     platform.verify_eci(ExternalCompilationInfo(
         compile_extra=['-DRPYTHON_LL2CTYPES'],
         **eci_kwds))
 
 
+    eci = global_eci
     vmprof_init = rffi.llexternal("vmprof_init",
                                   [rffi.INT, rffi.DOUBLE, rffi.CCHARP],
                                   rffi.CCHARP, compilation_info=eci)
@@ -55,7 +55,8 @@
                                            rffi.INT, compilation_info=eci)
     vmprof_ignore_signals = rffi.llexternal("vmprof_ignore_signals",
                                             [rffi.INT], lltype.Void,
-                                            compilation_info=eci)
+                                            compilation_info=eci,
+                                            _nowrapper=True)
     return CInterface(locals())
 
 
@@ -67,112 +68,34 @@
     def _freeze_(self):
         return True
 
-def token2lltype(tok):
-    if tok == 'i':
-        return lltype.Signed
-    if tok == 'r':
-        return llmemory.GCREF
-    raise NotImplementedError(repr(tok))
 
-def make_trampoline_function(name, func, token, restok):
-    from rpython.jit.backend import detect_cpu
+# --- copy a few declarations from src/vmprof_stack.h ---
 
-    cont_name = 'rpyvmprof_f_%s_%s' % (name, token)
-    tramp_name = 'rpyvmprof_t_%s_%s' % (name, token)
-    orig_tramp_name = tramp_name
+VMPROF_CODE_TAG = 1
 
-    func.c_name = cont_name
-    func._dont_inline_ = True
+VMPROFSTACK = lltype.ForwardReference()
+PVMPROFSTACK = lltype.Ptr(VMPROFSTACK)
+VMPROFSTACK.become(rffi.CStruct("vmprof_stack_s",
+                                ('next', PVMPROFSTACK),
+                                ('value', lltype.Signed),
+                                ('kind', lltype.Signed)))
+# ----------
 
-    if sys.platform == 'darwin':
-        # according to internet "At the time UNIX was written in 1974...."
-        # "... all C functions are prefixed with _"
-        cont_name = '_' + cont_name
-        tramp_name = '_' + tramp_name
-        PLT = ""
-        size_decl = ""
-        type_decl = ""
-        extra_align = ""
-    else:
-        PLT = "@PLT"
-        type_decl = "\t.type\t%s, @function" % (tramp_name,)
-        size_decl = "\t.size\t%s, .-%s" % (
-            tramp_name, tramp_name)
-        extra_align = "\t.cfi_def_cfa_offset 8"
 
-    assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), (
-        "rvmprof only supports x86-64 CPUs for now")
+vmprof_tl_stack = rthread.ThreadLocalField(PVMPROFSTACK, "vmprof_tl_stack")
+do_use_eci = rffi.llexternal_use_eci(
+    ExternalCompilationInfo(includes=['vmprof_stack.h'],
+                            include_dirs = [SRC]))
 
-    # mapping of argument count (not counting the final uid argument) to
-    # the register that holds this uid argument
-    reg = {0: '%rdi',
-           1: '%rsi',
-           2: '%rdx',
-           3: '%rcx',
-           4: '%r8',
-           5: '%r9',
-           }
-    try:
-        reg = reg[len(token)]
-    except KeyError:
-        raise NotImplementedError(
-            "not supported: %r takes more than 5 arguments" % (func,))
+def enter_code(unique_id):
+    do_use_eci()
+    s = lltype.malloc(VMPROFSTACK, flavor='raw')
+    s.c_next = vmprof_tl_stack.get_or_make_raw()
+    s.c_value = unique_id
+    s.c_kind = VMPROF_CODE_TAG
+    vmprof_tl_stack.setraw(s)
+    return s
 
-    target = udir.join('module_cache')
-    target.ensure(dir=1)
-    target = target.join('trampoline_%s_%s.vmprof.s' % (name, token))
-    # NOTE! the tabs in this file are absolutely essential, things
-    #       that don't start with \t are silently ignored (<arigato>: WAT!?)
-    target.write("""\
-\t.text
-\t.globl\t%(tramp_name)s
-%(type_decl)s
-%(tramp_name)s:
-\t.cfi_startproc
-\tpushq\t%(reg)s
-\t.cfi_def_cfa_offset 16
-\tcall %(cont_name)s%(PLT)s
-\taddq\t$8, %%rsp
-%(extra_align)s
-\tret
-\t.cfi_endproc
-%(size_decl)s
-""" % locals())
-
-    def tok2cname(tok):
-        if tok == 'i':
-            return 'long'
-        if tok == 'r':
-            return 'void *'
-        raise NotImplementedError(repr(tok))
-
-    header = 'RPY_EXTERN %s %s(%s);\n' % (
-        tok2cname(restok),
-        orig_tramp_name,
-        ', '.join([tok2cname(tok) for tok in token] + ['long']))
-
-    header += """\
-static int cmp_%s(void *addr) {
-    if (addr == %s) return 1;
-#ifdef VMPROF_ADDR_OF_TRAMPOLINE
-    return VMPROF_ADDR_OF_TRAMPOLINE(addr);
-#undef VMPROF_ADDR_OF_TRAMPOLINE
-#else
-    return 0;
-#endif
-#define VMPROF_ADDR_OF_TRAMPOLINE cmp_%s
-}
-""" % (tramp_name, orig_tramp_name, tramp_name)
-
-    eci = ExternalCompilationInfo(
-        post_include_bits = [header],
-        separate_module_files = [str(target)],
-    )
-
-    return rffi.llexternal(
-        orig_tramp_name,
-        [token2lltype(tok) for tok in token] + [lltype.Signed],
-        token2lltype(restok),
-        compilation_info=eci,
-        _nowrapper=True, sandboxsafe=True,
-        random_effects_on_gcobjs=True)
+def leave_code(s):
+    vmprof_tl_stack.setraw(s.c_next)
+    lltype.free(s, flavor='raw')
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -4,12 +4,19 @@
 from rpython.rlib.rvmprof import cintf
 from rpython.rtyper.annlowlevel import cast_instance_to_gcref
 from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
-from rpython.rtyper.lltypesystem import rffi
+from rpython.rtyper.lltypesystem import rffi, llmemory
+from rpython.rtyper.lltypesystem.lloperation import llop
 
 MAX_FUNC_NAME = 1023
 
 # ____________________________________________________________
 
+# keep in sync with vmprof_stack.h
+VMPROF_CODE_TAG = 1
+VMPROF_BLACKHOLE_TAG = 2
+VMPROF_JITTED_TAG = 3
+VMPROF_JITTING_TAG = 4
+VMPROF_GC_TAG = 5
 
 class VMProfError(Exception):
     def __init__(self, msg):
@@ -19,17 +26,16 @@
 
 class VMProf(object):
 
+    _immutable_fields_ = ['is_enabled?']
+
     def __init__(self):
         "NOT_RPYTHON: use _get_vmprof()"
         self._code_classes = set()
         self._gather_all_code_objs = lambda: None
         self._cleanup_()
-        if sys.maxint == 2147483647:
-            self._code_unique_id = 0 # XXX this is wrong, it won't work on 32bit
-        else:
-            self._code_unique_id = 0x7000000000000000
+        self._code_unique_id = 4
         self.cintf = cintf.setup()
-
+        
     def _cleanup_(self):
         self.is_enabled = False
 
@@ -127,7 +133,6 @@
         if self.cintf.vmprof_register_virtual_function(name, uid, 500000) < 0:
             raise VMProfError("vmprof buffers full!  disk full or too slow")
 
-
 def vmprof_execute_code(name, get_code_fn, result_class=None):
     """Decorator to be used on the function that interprets a code object.
 
@@ -136,12 +141,7 @@
     'get_code_fn(*args)' is called to extract the code object from the
     arguments given to the decorated function.
 
-    The original function can return None, an integer, or an instance.
-    In the latter case (only), 'result_class' must be set.
-
-    NOTE: for now, this assumes that the decorated functions only takes
-    instances or plain integer arguments, and at most 5 of them
-    (including 'self' if applicable).
+    'result_class' is ignored (backward compatibility).
     """
     def decorate(func):
         try:
@@ -149,52 +149,19 @@
         except cintf.VMProfPlatformUnsupported:
             return func
 
-        if hasattr(func, 'im_self'):
-            assert func.im_self is None
-            func = func.im_func
-
-        def lower(*args):
-            if len(args) == 0:
-                return (), ""
-            ll_args, token = lower(*args[1:])
-            ll_arg = args[0]
-            if isinstance(ll_arg, int):
-                tok = "i"
-            else:
-                tok = "r"


More information about the pypy-commit mailing list