[pypy-commit] pypy remove-getfield-pure: Initial merge attempt

sbauman pypy.commits at gmail.com
Tue Dec 22 14:39:10 EST 2015


Author: Spenser Andrew Bauman <sabauma at gmail.com>
Branch: remove-getfield-pure
Changeset: r81424:bd02a18f4c31
Date: 2015-12-22 13:04 -0500
http://bitbucket.org/pypy/pypy/changeset/bd02a18f4c31/

Log:	Initial merge attempt

diff too long, truncating to 2000 out of 24251 lines

diff --git a/Makefile b/Makefile
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
 
-all: pypy-c
+all: pypy-c cffi_imports
 
 PYPY_EXECUTABLE := $(shell which pypy)
 URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5")
@@ -10,6 +10,8 @@
 RUNINTERP = $(PYPY_EXECUTABLE)
 endif
 
+.PHONY: cffi_imports
+
 pypy-c:
 	@echo
 	@echo "===================================================================="
@@ -36,3 +38,6 @@
 # replaced with an opaque --jobserver option by the time this Makefile
 # runs.  We cannot get their original value either:
 # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html
+
+cffi_imports:
+	PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py
diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py
--- a/lib-python/2.7/collections.py
+++ b/lib-python/2.7/collections.py
@@ -18,9 +18,9 @@
     assert '__pypy__' not in _sys.builtin_module_names
     newdict = lambda _ : {}
 try:
-    from __pypy__ import reversed_dict
+    from __pypy__ import reversed_dict as _reversed_dict
 except ImportError:
-    reversed_dict = lambda d: reversed(d.keys())
+    _reversed_dict = None     # don't have ordered dicts
 
 try:
     from thread import get_ident as _get_ident
@@ -46,7 +46,7 @@
     '''
 
     def __reversed__(self):
-        return reversed_dict(self)
+        return _reversed_dict(self)
 
     def popitem(self, last=True):
         '''od.popitem() -> (k, v), return and remove a (key, value) pair.
@@ -116,6 +116,178 @@
         return ItemsView(self)
 
 
+def _compat_with_unordered_dicts():
+    # This returns the methods needed in OrderedDict in case the base
+    # 'dict' class is not actually ordered, like on top of CPython or
+    # old PyPy or PyPy-STM.
+
+    # ===== Original comments and code follows      =====
+    # ===== The unmodified methods are not repeated =====
+
+    # An inherited dict maps keys to values.
+    # The inherited dict provides __getitem__, __len__, __contains__, and get.
+    # The remaining methods are order-aware.
+    # Big-O running times for all methods are the same as regular dictionaries.
+
+    # The internal self.__map dict maps keys to links in a doubly linked list.
+    # The circular doubly linked list starts and ends with a sentinel element.
+    # The sentinel element never gets deleted (this simplifies the algorithm).
+    # Each link is stored as a list of length three:  [PREV, NEXT, KEY].
+
+    def __init__(self, *args, **kwds):
+        '''Initialize an ordered dictionary.  The signature is the same as
+        regular dictionaries, but keyword arguments are not recommended because
+        their insertion order is arbitrary.
+
+        '''
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        try:
+            self.__root
+        except AttributeError:
+            self.__root = root = []                     # sentinel node
+            root[:] = [root, root, None]
+            self.__map = {}
+        self.__update(*args, **kwds)
+
+    def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+        'od.__setitem__(i, y) <==> od[i]=y'
+        # Setting a new item creates a new link at the end of the linked list,
+        # and the inherited dictionary is updated with the new key/value pair.
+        if key not in self:
+            root = self.__root
+            last = root[0]
+            last[1] = root[0] = self.__map[key] = [last, root, key]
+        return dict_setitem(self, key, value)
+
+    def __delitem__(self, key, dict_delitem=dict.__delitem__):
+        'od.__delitem__(y) <==> del od[y]'
+        # Deleting an existing item uses self.__map to find the link which gets
+        # removed by updating the links in the predecessor and successor nodes.
+        dict_delitem(self, key)
+        link_prev, link_next, _ = self.__map.pop(key)
+        link_prev[1] = link_next                        # update link_prev[NEXT]
+        link_next[0] = link_prev                        # update link_next[PREV]
+
+    def __iter__(self):
+        'od.__iter__() <==> iter(od)'
+        # Traverse the linked list in order.
+        root = self.__root
+        curr = root[1]                                  # start at the first node
+        while curr is not root:
+            yield curr[2]                               # yield the curr[KEY]
+            curr = curr[1]                              # move to next node
+
+    def __reversed__(self):
+        'od.__reversed__() <==> reversed(od)'
+        # Traverse the linked list in reverse order.
+        root = self.__root
+        curr = root[0]                                  # start at the last node
+        while curr is not root:
+            yield curr[2]                               # yield the curr[KEY]
+            curr = curr[0]                              # move to previous node
+
+    def clear(self):
+        'od.clear() -> None.  Remove all items from od.'
+        root = self.__root
+        root[:] = [root, root, None]
+        self.__map.clear()
+        dict.clear(self)
+
+    # -- the following methods do not depend on the internal structure --
+
+    def keys(self):
+        'od.keys() -> list of keys in od'
+        return list(self)
+
+    def values(self):
+        'od.values() -> list of values in od'
+        return [self[key] for key in self]
+
+    def items(self):
+        'od.items() -> list of (key, value) pairs in od'
+        return [(key, self[key]) for key in self]
+
+    def iterkeys(self):
+        'od.iterkeys() -> an iterator over the keys in od'
+        return iter(self)
+
+    def itervalues(self):
+        'od.itervalues -> an iterator over the values in od'
+        for k in self:
+            yield self[k]
+
+    def iteritems(self):
+        'od.iteritems -> an iterator over the (key, value) pairs in od'
+        for k in self:
+            yield (k, self[k])
+
+    update = MutableMapping.update
+
+    __update = update # let subclasses override update without breaking __init__
+
+    __marker = object()
+
+    def pop(self, key, default=__marker):
+        '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
+        value.  If key is not found, d is returned if given, otherwise KeyError
+        is raised.
+
+        '''
+        if key in self:
+            result = self[key]
+            del self[key]
+            return result
+        if default is self.__marker:
+            raise KeyError(key)
+        return default
+
+    def setdefault(self, key, default=None):
+        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+        if key in self:
+            return self[key]
+        self[key] = default
+        return default
+
+    def popitem(self, last=True):
+        '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+        Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+        '''
+        if not self:
+            raise KeyError('dictionary is empty')
+        key = next(reversed(self) if last else iter(self))
+        value = self.pop(key)
+        return key, value
+
+    def __reduce__(self):
+        'Return state information for pickling'
+        items = [[k, self[k]] for k in self]
+        inst_dict = vars(self).copy()
+        for k in vars(OrderedDict()):
+            inst_dict.pop(k, None)
+        if inst_dict:
+            return (self.__class__, (items,), inst_dict)
+        return self.__class__, (items,)
+
+    @classmethod
+    def fromkeys(cls, iterable, value=None):
+        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
+        If not specified, the value defaults to None.
+
+        '''
+        self = cls()
+        for key in iterable:
+            self[key] = value
+        return self
+
+    return locals()
+
+if _reversed_dict is None:
+    for _key, _value in _compat_with_unordered_dicts().items():
+        setattr(OrderedDict, _key, _value)
+    del _key, _value
+
 ################################################################################
 ### namedtuple
 ################################################################################
diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py
--- a/lib-python/2.7/json/encoder.py
+++ b/lib-python/2.7/json/encoder.py
@@ -8,13 +8,13 @@
     def __init__(self):
         self._builder = StringBuilder()
     def append(self, string):
-        try:
-            self._builder.append(string)
-        except UnicodeEncodeError:
+        if (isinstance(string, unicode) and
+                type(self._builder) is StringBuilder):
             ub = UnicodeBuilder()
             ub.append(self._builder.build())
             self._builder = ub
-            ub.append(string)
+            self.append = ub.append   # shortcut only
+        self._builder.append(string)
     def build(self):
         return self._builder.build()
 
diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py
--- a/lib-python/2.7/uuid.py
+++ b/lib-python/2.7/uuid.py
@@ -604,21 +604,8 @@
 
 def uuid4():
     """Generate a random UUID."""
-
-    # When the system provides a version-4 UUID generator, use it.
-    if _uuid_generate_random:
-        _buffer = ctypes.create_string_buffer(16)
-        _uuid_generate_random(_buffer)
-        return UUID(bytes=_buffer.raw)
-
-    # Otherwise, get randomness from urandom or the 'random' module.
-    try:
-        import os
-        return UUID(bytes=os.urandom(16), version=4)
-    except:
-        import random
-        bytes = [chr(random.randrange(256)) for i in range(16)]
-        return UUID(bytes=bytes, version=4)
+    import os
+    return UUID(bytes=os.urandom(16), version=4)
 
 def uuid5(namespace, name):
     """Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cffi
-Version: 1.3.1
+Version: 1.4.1
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "1.3.1"
-__version_info__ = (1, 3, 1)
+__version__ = "1.4.1"
+__version_info__ = (1, 4, 1)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -146,7 +146,9 @@
     ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
 #define _cffi_convert_array_from_object                                  \
     ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
-#define _CFFI_NUM_EXPORTS 25
+#define _cffi_call_python                                                \
+    ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25])
+#define _CFFI_NUM_EXPORTS 26
 
 typedef struct _ctypedescr CTypeDescrObject;
 
@@ -201,8 +203,11 @@
                                                   the others follow */
 }
 
+/**********  end CPython-specific section  **********/
+#else
+_CFFI_UNUSED_FN
+static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *);
 #endif
-/**********  end CPython-specific section  **********/
 
 
 #define _cffi_array_len(array)   (sizeof(array) / sizeof((array)[0]))
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -72,6 +72,8 @@
         self._cdefsources = []
         self._included_ffis = []
         self._windows_unicode = None
+        self._init_once_cache = {}
+        self._cdef_version = None
         if hasattr(backend, 'set_ffi'):
             backend.set_ffi(self)
         for name in backend.__dict__:
@@ -104,6 +106,7 @@
                 raise TypeError("cdef() argument must be a string")
             csource = csource.encode('ascii')
         with self._lock:
+            self._cdef_version = object()
             self._parser.parse(csource, override=override, packed=packed)
             self._cdefsources.append(csource)
             if override:
@@ -589,14 +592,39 @@
         recompile(self, module_name, source,
                   c_file=filename, call_c_compiler=False, **kwds)
 
-    def compile(self, tmpdir='.'):
+    def compile(self, tmpdir='.', verbose=0):
         from .recompiler import recompile
         #
         if not hasattr(self, '_assigned_source'):
             raise ValueError("set_source() must be called before compile()")
         module_name, source, source_extension, kwds = self._assigned_source
         return recompile(self, module_name, source, tmpdir=tmpdir,
-                         source_extension=source_extension, **kwds)
+                         source_extension=source_extension,
+                         compiler_verbose=verbose, **kwds)
+
+    def init_once(self, func, tag):
+        # Read _init_once_cache[tag], which is either (False, lock) if
+        # we're calling the function now in some thread, or (True, result).
+        # Don't call setdefault() in most cases, to avoid allocating and
+        # immediately freeing a lock; but still use setdefaut() to avoid
+        # races.
+        try:
+            x = self._init_once_cache[tag]
+        except KeyError:
+            x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
+        # Common case: we got (True, result), so we return the result.
+        if x[0]:
+            return x[1]
+        # Else, it's a lock.  Acquire it to serialize the following tests.
+        with x[1]:
+            # Read again from _init_once_cache the current status.
+            x = self._init_once_cache[tag]
+            if x[0]:
+                return x[1]
+            # Call the function and store the result back.
+            result = func()
+            self._init_once_cache[tag] = (True, result)
+        return result
 
 
 def _load_backend_lib(backend, name, flags):
@@ -620,70 +648,70 @@
     import os
     backend = ffi._backend
     backendlib = _load_backend_lib(backend, libname, flags)
-    copied_enums = []
     #
-    def make_accessor_locked(name):
+    def accessor_function(name):
         key = 'function ' + name
-        if key in ffi._parser._declarations:
-            tp, _ = ffi._parser._declarations[key]
-            BType = ffi._get_cached_btype(tp)
-            try:
-                value = backendlib.load_function(BType, name)
-            except KeyError as e:
-                raise AttributeError('%s: %s' % (name, e))
-            library.__dict__[name] = value
+        tp, _ = ffi._parser._declarations[key]
+        BType = ffi._get_cached_btype(tp)
+        try:
+            value = backendlib.load_function(BType, name)
+        except KeyError as e:
+            raise AttributeError('%s: %s' % (name, e))
+        library.__dict__[name] = value
+    #
+    def accessor_variable(name):
+        key = 'variable ' + name
+        tp, _ = ffi._parser._declarations[key]
+        BType = ffi._get_cached_btype(tp)
+        read_variable = backendlib.read_variable
+        write_variable = backendlib.write_variable
+        setattr(FFILibrary, name, property(
+            lambda self: read_variable(BType, name),
+            lambda self, value: write_variable(BType, name, value)))
+    #
+    def accessor_constant(name):
+        raise NotImplementedError("non-integer constant '%s' cannot be "
+                                  "accessed from a dlopen() library" % (name,))
+    #
+    def accessor_int_constant(name):
+        library.__dict__[name] = ffi._parser._int_constants[name]
+    #
+    accessors = {}
+    accessors_version = [False]
+    #
+    def update_accessors():
+        if accessors_version[0] is ffi._cdef_version:
             return
         #
-        key = 'variable ' + name
-        if key in ffi._parser._declarations:
-            tp, _ = ffi._parser._declarations[key]
-            BType = ffi._get_cached_btype(tp)
-            read_variable = backendlib.read_variable
-            write_variable = backendlib.write_variable
-            setattr(FFILibrary, name, property(
-                lambda self: read_variable(BType, name),
-                lambda self, value: write_variable(BType, name, value)))
-            return
-        #
-        if not copied_enums:
-            from . import model
-            error = None
-            for key, (tp, _) in ffi._parser._declarations.items():
-                if not isinstance(tp, model.EnumType):
-                    continue
-                try:
-                    tp.check_not_partial()
-                except Exception as e:
-                    error = e
-                    continue
-                for enumname, enumval in zip(tp.enumerators, tp.enumvalues):
-                    if enumname not in library.__dict__:
-                        library.__dict__[enumname] = enumval
-            if error is not None:
-                if name in library.__dict__:
-                    return     # ignore error, about a different enum
-                raise error
-
-            for key, val in ffi._parser._int_constants.items():
-                if key not in library.__dict__:
-                    library.__dict__[key] = val
-
-            copied_enums.append(True)
-            if name in library.__dict__:
-                return
-        #
-        key = 'constant ' + name
-        if key in ffi._parser._declarations:
-            raise NotImplementedError("fetching a non-integer constant "
-                                      "after dlopen()")
-        #
-        raise AttributeError(name)
+        from . import model
+        for key, (tp, _) in ffi._parser._declarations.items():
+            if not isinstance(tp, model.EnumType):
+                tag, name = key.split(' ', 1)
+                if tag == 'function':
+                    accessors[name] = accessor_function
+                elif tag == 'variable':
+                    accessors[name] = accessor_variable
+                elif tag == 'constant':
+                    accessors[name] = accessor_constant
+            else:
+                for i, enumname in enumerate(tp.enumerators):
+                    def accessor_enum(name, tp=tp, i=i):
+                        tp.check_not_partial()
+                        library.__dict__[name] = tp.enumvalues[i]
+                    accessors[enumname] = accessor_enum
+        for name in ffi._parser._int_constants:
+            accessors.setdefault(name, accessor_int_constant)
+        accessors_version[0] = ffi._cdef_version
     #
     def make_accessor(name):
         with ffi._lock:
             if name in library.__dict__ or name in FFILibrary.__dict__:
                 return    # added by another thread while waiting for the lock
-            make_accessor_locked(name)
+            if name not in accessors:
+                update_accessors()
+                if name not in accessors:
+                    raise AttributeError(name)
+            accessors[name](name)
     #
     class FFILibrary(object):
         def __getattr__(self, name):
@@ -697,6 +725,10 @@
                 setattr(self, name, value)
             else:
                 property.__set__(self, value)
+        def __dir__(self):
+            with ffi._lock:
+                update_accessors()
+                return accessors.keys()
     #
     if libname is not None:
         try:
diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py
--- a/lib_pypy/cffi/cffi_opcode.py
+++ b/lib_pypy/cffi/cffi_opcode.py
@@ -54,6 +54,7 @@
 OP_DLOPEN_FUNC     = 35
 OP_DLOPEN_CONST    = 37
 OP_GLOBAL_VAR_F    = 39
+OP_EXTERN_PYTHON   = 41
 
 PRIM_VOID          = 0
 PRIM_BOOL          = 1
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,6 +29,7 @@
 _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
 _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
 _r_cdecl = re.compile(r"\b__cdecl\b")
+_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
 _r_star_const_space = re.compile(       # matches "* const "
     r"[*]\s*((const|volatile|restrict)\b\s*)+")
 
@@ -80,6 +81,47 @@
     parts.append(csource)
     return ''.join(parts)
 
+def _preprocess_extern_python(csource):
+    # input: `extern "Python" int foo(int);` or
+    #        `extern "Python" { int foo(int); }`
+    # output:
+    #     void __cffi_extern_python_start;
+    #     int foo(int);
+    #     void __cffi_extern_python_stop;
+    parts = []
+    while True:
+        match = _r_extern_python.search(csource)
+        if not match:
+            break
+        endpos = match.end() - 1
+        #print
+        #print ''.join(parts)+csource
+        #print '=>'
+        parts.append(csource[:match.start()])
+        parts.append('void __cffi_extern_python_start; ')
+        if csource[endpos] == '{':
+            # grouping variant
+            closing = csource.find('}', endpos)
+            if closing < 0:
+                raise api.CDefError("'extern \"Python\" {': no '}' found")
+            if csource.find('{', endpos + 1, closing) >= 0:
+                raise NotImplementedError("cannot use { } inside a block "
+                                          "'extern \"Python\" { ... }'")
+            parts.append(csource[endpos+1:closing])
+            csource = csource[closing+1:]
+        else:
+            # non-grouping variant
+            semicolon = csource.find(';', endpos)
+            if semicolon < 0:
+                raise api.CDefError("'extern \"Python\": no ';' found")
+            parts.append(csource[endpos:semicolon+1])
+            csource = csource[semicolon+1:]
+        parts.append(' void __cffi_extern_python_stop;')
+        #print ''.join(parts)+csource
+        #print
+    parts.append(csource)
+    return ''.join(parts)
+
 def _preprocess(csource):
     # Remove comments.  NOTE: this only work because the cdef() section
     # should not contain any string literal!
@@ -103,8 +145,13 @@
     csource = _r_stdcall2.sub(' volatile volatile const(', csource)
     csource = _r_stdcall1.sub(' volatile volatile const ', csource)
     csource = _r_cdecl.sub(' ', csource)
+    #
+    # Replace `extern "Python"` with start/end markers
+    csource = _preprocess_extern_python(csource)
+    #
     # Replace "[...]" with "[__dotdotdotarray__]"
     csource = _r_partial_array.sub('[__dotdotdotarray__]', csource)
+    #
     # Replace "...}" with "__dotdotdotNUM__}".  This construction should
     # occur only at the end of enums; at the end of structs we have "...;}"
     # and at the end of vararg functions "...);".  Also replace "=...[,}]"
@@ -257,6 +304,7 @@
                 break
         #
         try:
+            self._inside_extern_python = False
             for decl in iterator:
                 if isinstance(decl, pycparser.c_ast.Decl):
                     self._parse_decl(decl)
@@ -326,13 +374,19 @@
                     '  #define %s %s'
                     % (key, key, key, value))
 
+    def _declare_function(self, tp, quals, decl):
+        tp = self._get_type_pointer(tp, quals)
+        if self._inside_extern_python:
+            self._declare('extern_python ' + decl.name, tp)
+        else:
+            self._declare('function ' + decl.name, tp)
+
     def _parse_decl(self, decl):
         node = decl.type
         if isinstance(node, pycparser.c_ast.FuncDecl):
             tp, quals = self._get_type_and_quals(node, name=decl.name)
             assert isinstance(tp, model.RawFunctionType)
-            tp = self._get_type_pointer(tp, quals)
-            self._declare('function ' + decl.name, tp)
+            self._declare_function(tp, quals, decl)
         else:
             if isinstance(node, pycparser.c_ast.Struct):
                 self._get_struct_union_enum_type('struct', node)
@@ -348,8 +402,7 @@
                 tp, quals = self._get_type_and_quals(node,
                                                      partial_length_ok=True)
                 if tp.is_raw_function:
-                    tp = self._get_type_pointer(tp, quals)
-                    self._declare('function ' + decl.name, tp)
+                    self._declare_function(tp, quals, decl)
                 elif (tp.is_integer_type() and
                         hasattr(decl, 'init') and
                         hasattr(decl.init, 'value') and
@@ -362,10 +415,23 @@
                         _r_int_literal.match(decl.init.expr.value)):
                     self._add_integer_constant(decl.name,
                                                '-' + decl.init.expr.value)
-                elif (quals & model.Q_CONST) and not tp.is_array_type:
-                    self._declare('constant ' + decl.name, tp, quals=quals)
+                elif (tp is model.void_type and
+                      decl.name.startswith('__cffi_extern_python_')):
+                    # hack: `extern "Python"` in the C source is replaced
+                    # with "void __cffi_extern_python_start;" and
+                    # "void __cffi_extern_python_stop;"
+                    self._inside_extern_python = not self._inside_extern_python
+                    assert self._inside_extern_python == (
+                        decl.name == '__cffi_extern_python_start')
                 else:
-                    self._declare('variable ' + decl.name, tp, quals=quals)
+                    if self._inside_extern_python:
+                        raise api.CDefError(
+                            "cannot declare constants or "
+                            "variables with 'extern \"Python\"'")
+                    if (quals & model.Q_CONST) and not tp.is_array_type:
+                        self._declare('constant ' + decl.name, tp, quals=quals)
+                    else:
+                        self._declare('variable ' + decl.name, tp, quals=quals)
 
     def parse_type(self, cdecl):
         return self.parse_type_and_quals(cdecl)[0]
diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py
--- a/lib_pypy/cffi/ffiplatform.py
+++ b/lib_pypy/cffi/ffiplatform.py
@@ -17,15 +17,16 @@
 def get_extension(srcfilename, modname, sources=(), **kwds):
     from distutils.core import Extension
     allsources = [srcfilename]
-    allsources.extend(sources)
+    for src in sources:
+        allsources.append(os.path.normpath(src))
     return Extension(name=modname, sources=allsources, **kwds)
 
-def compile(tmpdir, ext):
+def compile(tmpdir, ext, compiler_verbose=0):
     """Compile a C extension module using distutils."""
 
     saved_environ = os.environ.copy()
     try:
-        outputfilename = _build(tmpdir, ext)
+        outputfilename = _build(tmpdir, ext, compiler_verbose)
         outputfilename = os.path.abspath(outputfilename)
     finally:
         # workaround for a distutils bugs where some env vars can
@@ -35,10 +36,10 @@
                 os.environ[key] = value
     return outputfilename
 
-def _build(tmpdir, ext):
+def _build(tmpdir, ext, compiler_verbose=0):
     # XXX compact but horrible :-(
     from distutils.core import Distribution
-    import distutils.errors
+    import distutils.errors, distutils.log
     #
     dist = Distribution({'ext_modules': [ext]})
     dist.parse_config_files()
@@ -48,7 +49,12 @@
     options['build_temp'] = ('ffiplatform', tmpdir)
     #
     try:
-        dist.run_command('build_ext')
+        old_level = distutils.log.set_threshold(0) or 0
+        try:
+            distutils.log.set_verbosity(compiler_verbose)
+            dist.run_command('build_ext')
+        finally:
+            distutils.log.set_threshold(old_level)
     except (distutils.errors.CompileError,
             distutils.errors.LinkError) as e:
         raise VerificationError('%s: %s' % (e.__class__.__name__, e))
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -1,5 +1,6 @@
 
-/* See doc/misc/parse_c_type.rst in the source of CFFI for more information */
+/* This part is from file 'cffi/parse_c_type.h'.  It is copied at the
+   beginning of C sources generated by CFFI's ffi.set_source(). */
 
 typedef void *_cffi_opcode_t;
 
@@ -27,6 +28,7 @@
 #define _CFFI_OP_DLOPEN_FUNC    35
 #define _CFFI_OP_DLOPEN_CONST   37
 #define _CFFI_OP_GLOBAL_VAR_F   39
+#define _CFFI_OP_EXTERN_PYTHON  41
 
 #define _CFFI_PRIM_VOID          0
 #define _CFFI_PRIM_BOOL          1
@@ -160,6 +162,12 @@
     const char *error_message;
 };
 
+struct _cffi_externpy_s {
+    const char *name;
+    size_t size_of_result;
+    void *reserved1, *reserved2;
+};
+
 #ifdef _CFFI_INTERNAL
 static int parse_c_type(struct _cffi_parse_info_s *info, const char *input);
 static int search_in_globals(const struct _cffi_type_context_s *ctx,
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -118,6 +118,7 @@
 
 
 class Recompiler:
+    _num_externpy = 0
 
     def __init__(self, ffi, module_name, target_is_python=False):
         self.ffi = ffi
@@ -356,7 +357,10 @@
         else:
             prnt('  NULL,  /* no includes */')
         prnt('  %d,  /* num_types */' % (len(self.cffi_types),))
-        prnt('  0,  /* flags */')
+        flags = 0
+        if self._num_externpy:
+            flags |= 1     # set to mean that we use extern "Python"
+        prnt('  %d,  /* flags */' % flags)
         prnt('};')
         prnt()
         #
@@ -366,6 +370,11 @@
         prnt('PyMODINIT_FUNC')
         prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,))
         prnt('{')
+        if self._num_externpy:
+            prnt('    if (((intptr_t)p[0]) >= 0x0A03) {')
+            prnt('        _cffi_call_python = '
+                 '(void(*)(struct _cffi_externpy_s *, char *))p[1];')
+            prnt('    }')
         prnt('    p[0] = (const void *)%s;' % VERSION)
         prnt('    p[1] = &_cffi_type_context;')
         prnt('}')
@@ -1108,6 +1117,75 @@
             GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index)))
 
     # ----------
+    # extern "Python"
+
+    def _generate_cpy_extern_python_collecttype(self, tp, name):
+        assert isinstance(tp, model.FunctionPtrType)
+        self._do_collect_type(tp)
+
+    def _generate_cpy_extern_python_decl(self, tp, name):
+        prnt = self._prnt
+        if isinstance(tp.result, model.VoidType):
+            size_of_result = '0'
+        else:
+            context = 'result of %s' % name
+            size_of_result = '(int)sizeof(%s)' % (
+                tp.result.get_c_name('', context),)
+        prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name)
+        prnt('  { "%s", %s };' % (name, size_of_result))
+        prnt()
+        #
+        arguments = []
+        context = 'argument of %s' % name
+        for i, type in enumerate(tp.args):
+            arg = type.get_c_name(' a%d' % i, context)
+            arguments.append(arg)
+        #
+        repr_arguments = ', '.join(arguments)
+        repr_arguments = repr_arguments or 'void'
+        name_and_arguments = '%s(%s)' % (name, repr_arguments)
+        #
+        def may_need_128_bits(tp):
+            return (isinstance(tp, model.PrimitiveType) and
+                    tp.name == 'long double')
+        #
+        size_of_a = max(len(tp.args)*8, 8)
+        if may_need_128_bits(tp.result):
+            size_of_a = max(size_of_a, 16)
+        if isinstance(tp.result, model.StructOrUnion):
+            size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
+                tp.result.get_c_name(''), size_of_a,
+                tp.result.get_c_name(''), size_of_a)
+        prnt('static %s' % tp.result.get_c_name(name_and_arguments))
+        prnt('{')
+        prnt('  char a[%s];' % size_of_a)
+        prnt('  char *p = a;')
+        for i, type in enumerate(tp.args):
+            arg = 'a%d' % i
+            if (isinstance(type, model.StructOrUnion) or
+                    may_need_128_bits(type)):
+                arg = '&' + arg
+                type = model.PointerType(type)
+            prnt('  *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg))
+        prnt('  _cffi_call_python(&_cffi_externpy__%s, p);' % name)
+        if not isinstance(tp.result, model.VoidType):
+            prnt('  return *(%s)p;' % (tp.result.get_c_name('*'),))
+        prnt('}')
+        prnt()
+        self._num_externpy += 1
+
+    def _generate_cpy_extern_python_ctx(self, tp, name):
+        if self.target_is_python:
+            raise ffiplatform.VerificationError(
+                "cannot use 'extern \"Python\"' in the ABI mode")
+        if tp.ellipsis:
+            raise NotImplementedError("a vararg function is extern \"Python\"")
+        type_index = self._typesdict[tp]
+        type_op = CffiOp(OP_EXTERN_PYTHON, type_index)
+        self._lsts["global"].append(
+            GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
+
+    # ----------
     # emitting the opcodes for individual types
 
     def _emit_bytecode_VoidType(self, tp, index):
@@ -1232,7 +1310,8 @@
     return os.path.join(outputdir, *parts), parts
 
 def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
-              c_file=None, source_extension='.c', extradir=None, **kwds):
+              c_file=None, source_extension='.c', extradir=None,
+              compiler_verbose=1, **kwds):
     if not isinstance(module_name, str):
         module_name = module_name.encode('ascii')
     if ffi._windows_unicode:
@@ -1252,7 +1331,7 @@
             cwd = os.getcwd()
             try:
                 os.chdir(tmpdir)
-                outputfilename = ffiplatform.compile('.', ext)
+                outputfilename = ffiplatform.compile('.', ext, compiler_verbose)
             finally:
                 os.chdir(cwd)
             return outputfilename
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -21,6 +21,8 @@
 import math as _math
 import struct as _struct
 
+_SENTINEL = object()
+
 def _cmp(x, y):
     return 0 if x == y else 1 if x > y else -1
 
@@ -31,6 +33,8 @@
 MAXYEAR = 9999
 _MINYEARFMT = 1900
 
+_MAX_DELTA_DAYS = 999999999
+
 # Utility functions, adapted from Python's Demo/classes/Dates.py, which
 # also assumes the current Gregorian calendar indefinitely extended in
 # both directions.  Difference:  Dates.py calls January 1 of year 0 day
@@ -95,6 +99,15 @@
 # pasting together 25 4-year cycles.
 assert _DI100Y == 25 * _DI4Y - 1
 
+_US_PER_US = 1
+_US_PER_MS = 1000
+_US_PER_SECOND = 1000000
+_US_PER_MINUTE = 60000000
+_SECONDS_PER_DAY = 24 * 3600
+_US_PER_HOUR = 3600000000
+_US_PER_DAY = 86400000000
+_US_PER_WEEK = 604800000000
+
 def _ord2ymd(n):
     "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
 
@@ -271,15 +284,17 @@
 
 def _check_int_field(value):
     if isinstance(value, int):
-        return value
+        return int(value)
     if not isinstance(value, float):
         try:
             value = value.__int__()
         except AttributeError:
             pass
         else:
-            if isinstance(value, (int, long)):
-                return value
+            if isinstance(value, int):
+                return int(value)
+            elif isinstance(value, long):
+                return int(long(value))
             raise TypeError('__int__ method should return an integer')
         raise TypeError('an integer is required')
     raise TypeError('integer argument expected, got float')
@@ -344,75 +359,79 @@
     raise TypeError("can't compare '%s' to '%s'" % (
                     type(x).__name__, type(y).__name__))
 
-# This is a start at a struct tm workalike.  Goals:
-#
-# + Works the same way across platforms.
-# + Handles all the fields datetime needs handled, without 1970-2038 glitches.
-#
-# Note:  I suspect it's best if this flavor of tm does *not* try to
-# second-guess timezones or DST.  Instead fold whatever adjustments you want
-# into the minutes argument (and the constructor will normalize).
+def _normalize_pair(hi, lo, factor):
+    if not 0 <= lo <= factor-1:
+        inc, lo = divmod(lo, factor)
+        hi += inc
+    return hi, lo
 
-class _tmxxx:
+def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False):
+    # Normalize all the inputs, and store the normalized values.
+    ss, us = _normalize_pair(ss, us, 1000000)
+    mm, ss = _normalize_pair(mm, ss, 60)
+    hh, mm = _normalize_pair(hh, mm, 60)
+    d, hh = _normalize_pair(d, hh, 24)
+    y, m, d = _normalize_date(y, m, d, ignore_overflow)
+    return y, m, d, hh, mm, ss, us
 
-    ordinal = None
+def _normalize_date(year, month, day, ignore_overflow=False):
+    # That was easy.  Now it gets muddy:  the proper range for day
+    # can't be determined without knowing the correct month and year,
+    # but if day is, e.g., plus or minus a million, the current month
+    # and year values make no sense (and may also be out of bounds
+    # themselves).
+    # Saying 12 months == 1 year should be non-controversial.
+    if not 1 <= month <= 12:
+        year, month = _normalize_pair(year, month-1, 12)
+        month += 1
+        assert 1 <= month <= 12
 
-    def __init__(self, year, month, day, hour=0, minute=0, second=0,
-                 microsecond=0):
-        # Normalize all the inputs, and store the normalized values.
-        if not 0 <= microsecond <= 999999:
-            carry, microsecond = divmod(microsecond, 1000000)
-            second += carry
-        if not 0 <= second <= 59:
-            carry, second = divmod(second, 60)
-            minute += carry
-        if not 0 <= minute <= 59:
-            carry, minute = divmod(minute, 60)
-            hour += carry
-        if not 0 <= hour <= 23:
-            carry, hour = divmod(hour, 24)
-            day += carry
+    # Now only day can be out of bounds (year may also be out of bounds
+    # for a datetime object, but we don't care about that here).
+    # If day is out of bounds, what to do is arguable, but at least the
+    # method here is principled and explainable.
+    dim = _days_in_month(year, month)
+    if not 1 <= day <= dim:
+        # Move day-1 days from the first of the month.  First try to
+        # get off cheap if we're only one day out of range (adjustments
+        # for timezone alone can't be worse than that).
+        if day == 0:    # move back a day
+            month -= 1
+            if month > 0:
+                day = _days_in_month(year, month)
+            else:
+                year, month, day = year-1, 12, 31
+        elif day == dim + 1:    # move forward a day
+            month += 1
+            day = 1
+            if month > 12:
+                month = 1
+                year += 1
+        else:
+            ordinal = _ymd2ord(year, month, 1) + (day - 1)
+            year, month, day = _ord2ymd(ordinal)
 
-        # That was easy.  Now it gets muddy:  the proper range for day
-        # can't be determined without knowing the correct month and year,
-        # but if day is, e.g., plus or minus a million, the current month
-        # and year values make no sense (and may also be out of bounds
-        # themselves).
-        # Saying 12 months == 1 year should be non-controversial.
-        if not 1 <= month <= 12:
-            carry, month = divmod(month-1, 12)
-            year += carry
-            month += 1
-            assert 1 <= month <= 12
+    if not ignore_overflow and not MINYEAR <= year <= MAXYEAR:
+        raise OverflowError("date value out of range")
+    return year, month, day
 
-        # Now only day can be out of bounds (year may also be out of bounds
-        # for a datetime object, but we don't care about that here).
-        # If day is out of bounds, what to do is arguable, but at least the
-        # method here is principled and explainable.
-        dim = _days_in_month(year, month)
-        if not 1 <= day <= dim:
-            # Move day-1 days from the first of the month.  First try to
-            # get off cheap if we're only one day out of range (adjustments
-            # for timezone alone can't be worse than that).
-            if day == 0:    # move back a day
-                month -= 1
-                if month > 0:
-                    day = _days_in_month(year, month)
-                else:
-                    year, month, day = year-1, 12, 31
-            elif day == dim + 1:    # move forward a day
-                month += 1
-                day = 1
-                if month > 12:
-                    month = 1
-                    year += 1
-            else:
-                self.ordinal = _ymd2ord(year, month, 1) + (day - 1)
-                year, month, day = _ord2ymd(self.ordinal)
-
-        self.year, self.month, self.day = year, month, day
-        self.hour, self.minute, self.second = hour, minute, second
-        self.microsecond = microsecond
+def _accum(tag, sofar, num, factor, leftover):
+    if isinstance(num, (int, long)):
+        prod = num * factor
+        rsum = sofar + prod
+        return rsum, leftover
+    if isinstance(num, float):
+        fracpart, intpart = _math.modf(num)
+        prod = int(intpart) * factor
+        rsum = sofar + prod
+        if fracpart == 0.0:
+            return rsum, leftover
+        assert isinstance(factor, (int, long))
+        fracpart, intpart = _math.modf(factor * fracpart)
+        rsum += int(intpart)
+        return rsum, leftover + fracpart
+    raise TypeError("unsupported type for timedelta %s component: %s" %
+                    (tag, type(num)))
 
 class timedelta(object):
     """Represent the difference between two datetime objects.
@@ -433,100 +452,42 @@
     """
     __slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
 
-    def __new__(cls, days=0, seconds=0, microseconds=0,
-                milliseconds=0, minutes=0, hours=0, weeks=0):
-        # Doing this efficiently and accurately in C is going to be difficult
-        # and error-prone, due to ubiquitous overflow possibilities, and that
-        # C double doesn't have enough bits of precision to represent
-        # microseconds over 10K years faithfully.  The code here tries to make
-        # explicit where go-fast assumptions can be relied on, in order to
-        # guide the C implementation; it's way more convoluted than speed-
-        # ignoring auto-overflow-to-long idiomatic Python could be.
+    def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL,
+                milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL):
+        x = 0
+        leftover = 0.0
+        if microseconds is not _SENTINEL:
+            x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover)
+        if milliseconds is not _SENTINEL:
+            x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover)
+        if seconds is not _SENTINEL:
+            x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover)
+        if minutes is not _SENTINEL:
+            x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover)
+        if hours is not _SENTINEL:
+            x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover)
+        if days is not _SENTINEL:
+            x, leftover = _accum("days", x, days, _US_PER_DAY, leftover)
+        if weeks is not _SENTINEL:
+            x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover)
+        if leftover != 0.0:
+            x += _round(leftover)
+        return cls._from_microseconds(x)
 
-        # XXX Check that all inputs are ints, longs or floats.
+    @classmethod
+    def _from_microseconds(cls, us):
+        s, us = divmod(us, _US_PER_SECOND)
+        d, s = divmod(s, _SECONDS_PER_DAY)
+        return cls._create(d, s, us, False)
 
-        # Final values, all integer.
-        # s and us fit in 32-bit signed ints; d isn't bounded.
-        d = s = us = 0
+    @classmethod
+    def _create(cls, d, s, us, normalize):
+        if normalize:
+            s, us = _normalize_pair(s, us, 1000000)
+            d, s = _normalize_pair(d, s, 24*3600)
 
-        # Normalize everything to days, seconds, microseconds.
-        days += weeks*7
-        seconds += minutes*60 + hours*3600
-        microseconds += milliseconds*1000
-
-        # Get rid of all fractions, and normalize s and us.
-        # Take a deep breath <wink>.
-        if isinstance(days, float):
-            dayfrac, days = _math.modf(days)
-            daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
-            assert daysecondswhole == int(daysecondswhole)  # can't overflow
-            s = int(daysecondswhole)
-            assert days == int(days)
-            d = int(days)
-        else:
-            daysecondsfrac = 0.0
-            d = days
-        assert isinstance(daysecondsfrac, float)
-        assert abs(daysecondsfrac) <= 1.0
-        assert isinstance(d, (int, long))
-        assert abs(s) <= 24 * 3600
-        # days isn't referenced again before redefinition
-
-        if isinstance(seconds, float):
-            secondsfrac, seconds = _math.modf(seconds)
-            assert seconds == int(seconds)
-            seconds = int(seconds)
-            secondsfrac += daysecondsfrac
-            assert abs(secondsfrac) <= 2.0
-        else:
-            secondsfrac = daysecondsfrac
-        # daysecondsfrac isn't referenced again
-        assert isinstance(secondsfrac, float)
-        assert abs(secondsfrac) <= 2.0
-
-        assert isinstance(seconds, (int, long))
-        days, seconds = divmod(seconds, 24*3600)
-        d += days
-        s += int(seconds)    # can't overflow
-        assert isinstance(s, int)
-        assert abs(s) <= 2 * 24 * 3600
-        # seconds isn't referenced again before redefinition
-
-        usdouble = secondsfrac * 1e6
-        assert abs(usdouble) < 2.1e6    # exact value not critical
-        # secondsfrac isn't referenced again
-
-        if isinstance(microseconds, float):
-            microseconds = _round(microseconds + usdouble)
-            seconds, microseconds = divmod(microseconds, 1000000)
-            days, seconds = divmod(seconds, 24*3600)
-            d += days
-            s += int(seconds)
-            microseconds = int(microseconds)
-        else:
-            microseconds = int(microseconds)
-            seconds, microseconds = divmod(microseconds, 1000000)
-            days, seconds = divmod(seconds, 24*3600)
-            d += days
-            s += int(seconds)
-            microseconds = _round(microseconds + usdouble)
-        assert isinstance(s, int)
-        assert isinstance(microseconds, int)
-        assert abs(s) <= 3 * 24 * 3600
-        assert abs(microseconds) < 3.1e6
-
-        # Just a little bit of carrying possible for microseconds and seconds.
-        seconds, us = divmod(microseconds, 1000000)
-        s += seconds
-        days, s = divmod(s, 24*3600)
-        d += days
-
-        assert isinstance(d, (int, long))
-        assert isinstance(s, int) and 0 <= s < 24*3600
-        assert isinstance(us, int) and 0 <= us < 1000000
-
-        if abs(d) > 999999999:
-            raise OverflowError("timedelta # of days is too large: %d" % d)
+        if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
+            raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS))
 
         self = object.__new__(cls)
         self._days = d
@@ -535,6 +496,10 @@
         self._hashcode = -1
         return self
 
+    def _to_microseconds(self):
+        return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND +
+                self._microseconds)
+
     def __repr__(self):
         module = "datetime." if self.__class__ is timedelta else ""
         if self._microseconds:
@@ -562,8 +527,7 @@
 
     def total_seconds(self):
         """Total seconds in the duration."""
-        return ((self.days * 86400 + self.seconds) * 10**6 +
-                self.microseconds) / 10**6
+        return self._to_microseconds() / 10**6
 
     # Read-only field accessors
     @property
@@ -585,36 +549,37 @@
         if isinstance(other, timedelta):
             # for CPython compatibility, we cannot use
             # our __class__ here, but need a real timedelta
-            return timedelta(self._days + other._days,
-                             self._seconds + other._seconds,
-                             self._microseconds + other._microseconds)
+            return timedelta._create(self._days + other._days,
+                                     self._seconds + other._seconds,
+                                     self._microseconds + other._microseconds,
+                                     True)
         return NotImplemented
 
-    __radd__ = __add__
-
     def __sub__(self, other):
         if isinstance(other, timedelta):
             # for CPython compatibility, we cannot use
             # our __class__ here, but need a real timedelta
-            return timedelta(self._days - other._days,
-                             self._seconds - other._seconds,
-                             self._microseconds - other._microseconds)
-        return NotImplemented
-
-    def __rsub__(self, other):
-        if isinstance(other, timedelta):
-            return -self + other
+            return timedelta._create(self._days - other._days,
+                                     self._seconds - other._seconds,
+                                     self._microseconds - other._microseconds,
+                                     True)
         return NotImplemented
 
     def __neg__(self):
         # for CPython compatibility, we cannot use
         # our __class__ here, but need a real timedelta
-        return timedelta(-self._days,
-                         -self._seconds,
-                         -self._microseconds)
+        return timedelta._create(-self._days,
+                                 -self._seconds,
+                                 -self._microseconds,
+                                 True)
 
     def __pos__(self):
-        return self
+        # for CPython compatibility, we cannot use
+        # our __class__ here, but need a real timedelta
+        return timedelta._create(self._days,
+                                 self._seconds,
+                                 self._microseconds,
+                                 False)
 
     def __abs__(self):
         if self._days < 0:
@@ -623,25 +588,18 @@
             return self
 
     def __mul__(self, other):
-        if isinstance(other, (int, long)):
-            # for CPython compatibility, we cannot use
-            # our __class__ here, but need a real timedelta
-            return timedelta(self._days * other,
-                             self._seconds * other,
-                             self._microseconds * other)
-        return NotImplemented
+        if not isinstance(other, (int, long)):
+            return NotImplemented
+        usec = self._to_microseconds()
+        return timedelta._from_microseconds(usec * other)
 
     __rmul__ = __mul__
 
-    def _to_microseconds(self):
-        return ((self._days * (24*3600) + self._seconds) * 1000000 +
-                self._microseconds)
-
     def __div__(self, other):
         if not isinstance(other, (int, long)):
             return NotImplemented
         usec = self._to_microseconds()
-        return timedelta(0, 0, usec // other)
+        return timedelta._from_microseconds(usec // other)
 
     __floordiv__ = __div__
 
@@ -705,9 +663,8 @@
     def __reduce__(self):
         return (self.__class__, self._getstate())
 
-timedelta.min = timedelta(-999999999)
-timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
-                          microseconds=999999)
+timedelta.min = timedelta(-_MAX_DELTA_DAYS)
+timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1)
 timedelta.resolution = timedelta(microseconds=1)
 
 class date(object):
@@ -948,32 +905,29 @@
 
     # Computations
 
-    def _checkOverflow(self, year):
-        if not MINYEAR <= year <= MAXYEAR:
-            raise OverflowError("date +/-: result year %d not in %d..%d" %
-                                (year, MINYEAR, MAXYEAR))
+    def _add_timedelta(self, other, factor):
+        y, m, d = _normalize_date(
+            self._year,
+            self._month,
+            self._day + other.days * factor)
+        return date(y, m, d)
 
     def __add__(self, other):
         "Add a date to a timedelta."
         if isinstance(other, timedelta):
-            t = _tmxxx(self._year,
-                      self._month,
-                      self._day + other.days)
-            self._checkOverflow(t.year)
-            result = date(t.year, t.month, t.day)
-            return result
+            return self._add_timedelta(other, 1)
         return NotImplemented
 
     __radd__ = __add__
 
     def __sub__(self, other):
         """Subtract two dates, or a date and a timedelta."""
-        if isinstance(other, timedelta):
-            return self + timedelta(-other.days)
         if isinstance(other, date):
             days1 = self.toordinal()
             days2 = other.toordinal()
-            return timedelta(days1 - days2)
+            return timedelta._create(days1 - days2, 0, 0, False)
+        if isinstance(other, timedelta):
+            return self._add_timedelta(other, -1)
         return NotImplemented
 
     def weekday(self):
@@ -1340,7 +1294,7 @@
         offset = self._tzinfo.utcoffset(None)
         offset = _check_utc_offset("utcoffset", offset)
         if offset is not None:
-            offset = timedelta(minutes=offset)
+            offset = timedelta._create(0, offset * 60, 0, True)
         return offset
 
     # Return an integer (or None) instead of a timedelta (or None).
@@ -1378,7 +1332,7 @@
         offset = self._tzinfo.dst(None)
         offset = _check_utc_offset("dst", offset)
         if offset is not None:
-            offset = timedelta(minutes=offset)
+            offset = timedelta._create(0, offset * 60, 0, True)
         return offset
 
     # Return an integer (or None) instead of a timedelta (or None).
@@ -1505,18 +1459,24 @@
 
         A timezone info object may be passed in as well.
         """
+        _check_tzinfo_arg(tz)
+        converter = _time.localtime if tz is None else _time.gmtime
+        self = cls._from_timestamp(converter, timestamp, tz)
+        if tz is not None:
+            self = tz.fromutc(self)
+        return self
 
-        _check_tzinfo_arg(tz)
+    @classmethod
+    def utcfromtimestamp(cls, t):
+        "Construct a UTC datetime from a POSIX timestamp (like time.time())."
+        return cls._from_timestamp(_time.gmtime, t, None)
 
-        converter = _time.localtime if tz is None else _time.gmtime
-
-        if isinstance(timestamp, int):
-            us = 0
-        else:
-            t_full = timestamp
-            timestamp = int(_math.floor(timestamp))
-            frac = t_full - timestamp
-            us = _round(frac * 1e6)
+    @classmethod
+    def _from_timestamp(cls, converter, timestamp, tzinfo):
+        t_full = timestamp
+        timestamp = int(_math.floor(timestamp))
+        frac = t_full - timestamp
+        us = _round(frac * 1e6)
 
         # If timestamp is less than one microsecond smaller than a
         # full second, us can be rounded up to 1000000.  In this case,
@@ -1527,32 +1487,7 @@
             us = 0
         y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
         ss = min(ss, 59)    # clamp out leap seconds if the platform has them
-        result = cls(y, m, d, hh, mm, ss, us, tz)
-        if tz is not None:
-            result = tz.fromutc(result)
-        return result
-
-    @classmethod
-    def utcfromtimestamp(cls, t):
-        "Construct a UTC datetime from a POSIX timestamp (like time.time())."
-        if isinstance(t, int):
-            us = 0
-        else:
-            t_full = t
-            t = int(_math.floor(t))
-            frac = t_full - t
-            us = _round(frac * 1e6)
-
-        # If timestamp is less than one microsecond smaller than a
-        # full second, us can be rounded up to 1000000.  In this case,
-        # roll over to seconds, otherwise, ValueError is raised
-        # by the constructor.
-        if us == 1000000:
-            t += 1
-            us = 0
-        y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
-        ss = min(ss, 59)    # clamp out leap seconds if the platform has them
-        return cls(y, m, d, hh, mm, ss, us)
+        return cls(y, m, d, hh, mm, ss, us, tzinfo)
 
     @classmethod
     def now(cls, tz=None):
@@ -1594,9 +1529,9 @@
         hh, mm, ss = self.hour, self.minute, self.second
         offset = self._utcoffset()
         if offset:  # neither None nor 0
-            tm = _tmxxx(y, m, d, hh, mm - offset)
-            y, m, d = tm.year, tm.month, tm.day
-            hh, mm = tm.hour, tm.minute
+            mm -= offset
+            y, m, d, hh, mm, ss, _ = _normalize_datetime(
+                y, m, d, hh, mm, ss, 0, ignore_overflow=True)
         return _build_struct_time(y, m, d, hh, mm, ss, 0)
 
     def date(self):
@@ -1730,7 +1665,7 @@
         offset = self._tzinfo.utcoffset(self)
         offset = _check_utc_offset("utcoffset", offset)
         if offset is not None:
-            offset = timedelta(minutes=offset)
+            offset = timedelta._create(0, offset * 60, 0, True)
         return offset
 
     # Return an integer (or None) instead of a timedelta (or None).
@@ -1768,7 +1703,7 @@
         offset = self._tzinfo.dst(self)
         offset = _check_utc_offset("dst", offset)
         if offset is not None:
-            offset = timedelta(minutes=offset)
+            offset = timedelta._create(0, offset * 60, 0, True)
         return offset
 
     # Return an integer (or None) instead of a timedelta (or None).
@@ -1859,22 +1794,22 @@
             return -1
         return diff and 1 or 0
 
+    def _add_timedelta(self, other, factor):
+        y, m, d, hh, mm, ss, us = _normalize_datetime(
+            self._year,
+            self._month,
+            self._day + other.days * factor,
+            self._hour,
+            self._minute,
+            self._second + other.seconds * factor,
+            self._microsecond + other.microseconds * factor)
+        return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo)
+
     def __add__(self, other):
         "Add a datetime and a timedelta."
         if not isinstance(other, timedelta):
             return NotImplemented
-        t = _tmxxx(self._year,
-                  self._month,
-                  self._day + other.days,
-                  self._hour,
-                  self._minute,
-                  self._second + other.seconds,
-                  self._microsecond + other.microseconds)
-        self._checkOverflow(t.year)
-        result = datetime(t.year, t.month, t.day,
-                                t.hour, t.minute, t.second,
-                                t.microsecond, tzinfo=self._tzinfo)
-        return result
+        return self._add_timedelta(other, 1)
 
     __radd__ = __add__
 
@@ -1882,16 +1817,15 @@
         "Subtract two datetimes, or a datetime and a timedelta."
         if not isinstance(other, datetime):
             if isinstance(other, timedelta):
-                return self + -other
+                return self._add_timedelta(other, -1)
             return NotImplemented
 
-        days1 = self.toordinal()
-        days2 = other.toordinal()
-        secs1 = self._second + self._minute * 60 + self._hour * 3600
-        secs2 = other._second + other._minute * 60 + other._hour * 3600
-        base = timedelta(days1 - days2,
-                         secs1 - secs2,
-                         self._microsecond - other._microsecond)
+        delta_d = self.toordinal() - other.toordinal()
+        delta_s = (self._hour - other._hour) * 3600 + \
+                  (self._minute - other._minute) * 60 + \
+                  (self._second - other._second)
+        delta_us = self._microsecond - other._microsecond
+        base = timedelta._create(delta_d, delta_s, delta_us, True)
         if self._tzinfo is other._tzinfo:
             return base
         myoff = self._utcoffset()
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -73,28 +73,36 @@
 lzma (PyPy3 only)
     liblzma
 
-sqlite3
-    libsqlite3
-
-curses
-    libncurses + cffi dependencies from above
-
 pyexpat
     libexpat1
 
 _ssl
     libssl
 
+Make sure to have these libraries (with development headers) installed
+before building PyPy, otherwise the resulting binary will not contain
+these modules.  Furthermore, the following libraries should be present
+after building PyPy, otherwise the corresponding CFFI modules are not
+built (you can run or re-run `pypy/tool/release/package.py` to retry
+to build them; you don't need to re-translate the whole PyPy):
+
+sqlite3
+    libsqlite3
+
+curses
+    libncurses
+
 gdbm
     libgdbm-dev
 
-Make sure to have these libraries (with development headers) installed before
-building PyPy, otherwise the resulting binary will not contain these modules.
+tk
+    tk-dev
 
 On Debian, this is the command to install all build-time dependencies::
 
     apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
-    libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev
+    libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
+    tk-dev
 
 For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
 
@@ -102,6 +110,7 @@
 
     yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
     lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
+    (XXX plus the Febora version of libgdbm-dev and tk-dev)
 
 For the optional lzma module on PyPy3 you will also need ``xz-devel``.
 
@@ -110,6 +119,7 @@
     zypper install gcc make python-devel pkg-config \
     zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
     libexpat-devel libffi-devel python-curses
+    (XXX plus the SLES11 version of libgdbm-dev and tk-dev)
 
 For the optional lzma module on PyPy3 you will also need ``xz-devel``.
 
@@ -125,11 +135,13 @@
 
 Translate with JIT::
 
-    pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py
+    cd pypy/goal
+    pypy ../../rpython/bin/rpython --opt=jit
 
 Translate without JIT::
 
-    pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py
+    cd pypy/goal
+    pypy ../../rpython/bin/rpython --opt=2
 
 (You can use ``python`` instead of ``pypy`` here, which will take longer
 but works too.)
@@ -138,8 +150,16 @@
 current directory. The executable behaves mostly like a normal Python
 interpreter (see :doc:`cpython_differences`).
 
+Build cffi import libraries for the stdlib
+------------------------------------------
 
-.. _translate-pypy:
+Various stdlib modules require a separate build step to create the cffi
+import libraries in the `out-of-line API mode`_. This is done by the following
+command::
+
+   PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py
+
+.. _`out-of-line API mode`: http://cffi.readthedocs.org/en/latest/overview.html#real-example-api-level-out-of-line
 
 Translating with non-standard options
 -------------------------------------
@@ -199,4 +219,3 @@
 that this is never the case.
 
 
-.. TODO windows
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -130,8 +130,13 @@
 More complete example
 ---------------------
 
-.. note:: This example depends on pypy_execute_source_ptr which is not available
-          in PyPy <= 2.2.1.
+.. note:: Note that we do not make use of ``extern "Python"``, the new
+   way to do callbacks in CFFI 1.4: this is because these examples use
+   the ABI mode, not the API mode, and with the ABI mode you still have
+   to use ``ffi.callback()``.  It is work in progress to integrate
+   ``extern "Python"`` with the idea of embedding (and it is expected
+   to ultimately lead to a better way to do embedding than the one
+   described here, and that would work equally well on CPython and PyPy).
 
 Typically we need something more to do than simply execute source. The following
 is a fully fledged example, please consult cffi documentation for details.
diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst
--- a/pypy/doc/stm.rst
+++ b/pypy/doc/stm.rst
@@ -83,28 +83,27 @@
 
 **pypy-stm requires 64-bit Linux for now.**
 
-Development is done in the branch `stmgc-c7`_.  If you are only
-interested in trying it out, you can download a Ubuntu binary here__
-(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04).  The current version
-supports four "segments", which means that it will run up to four
-threads in parallel.  (Development recently switched to `stmgc-c8`_,
-but that is not ready for trying out yet.)
+Development is done in the branch `stmgc-c8`_.  If you are only
+interested in trying it out, please pester us until we upload a recent
+prebuilt binary.  The current version supports four "segments", which
+means that it will run up to four threads in parallel.
 
 To build a version from sources, you first need to compile a custom
-version of clang(!); we recommend downloading `llvm and clang like
-described here`__, but at revision 201645 (use ``svn co -r 201645 <path>``
-for all checkouts).  Then apply all the patches in `this directory`__:
-they are fixes for a clang-only feature that hasn't been used so heavily
-in the past (without the patches, you get crashes of clang).  Then get
-the branch `stmgc-c7`_ of PyPy and run::
+version of gcc(!).  See the instructions here:
+https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/
+(Note that these patches are being incorporated into gcc.  It is likely
+that future versions of gcc will not need to be patched any more.)
 
-   rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py
+Then get the branch `stmgc-c8`_ of PyPy and run::
 
-.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/
+   cd pypy/goal
+   ../../rpython/bin/rpython -Ojit --stm
+
+At the end, this will try to compile the generated C code by calling
+``gcc-seg-gs``, which must be the script you installed in the
+instructions above.
+
 .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/
-.. __: https://bitbucket.org/pypy/pypy/downloads/
-.. __: http://clang.llvm.org/get_started.html
-.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/
 
 
 .. _caveats:
@@ -112,6 +111,12 @@
 Current status (stmgc-c7)
 -------------------------
 
+.. warning::
+    
+    THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT
+    DEVELOPMENT WORK IS DONE ON STMGC-C8
+
+
 * **NEW:** It seems to work fine, without crashing any more.  Please `report
   any crash`_ you find (or other bugs).
 
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -15,4 +15,78 @@
 Fix the cpyext tests on OSX by linking with -flat_namespace
 
 .. branch: anntype
+
 Refactor and improve exception analysis in the annotator.
+
+.. branch: posita/2193-datetime-timedelta-integrals
+
+Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` 
+to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``)
+
+.. branch: faster-rstruct
+
+Improve the performace of struct.unpack, which now directly reads inside the
+string buffer and directly casts the bytes to the appropriate type, when
+allowed. Unpacking of floats and doubles is about 15 times faster now, while
+for integer types it's up to ~50% faster for 64bit integers.
+
+.. branch: wrap-specialisation
+
+Remove unnecessary special handling of space.wrap().
+
+.. branch: compress-numbering
+
+Improve the memory signature of numbering instances in the JIT.
+
+.. branch: fix-trace-too-long-heuristic
+
+Improve the heuristic when disable trace-too-long
+
+.. branch: fix-setslice-can-resize
+
+.. branch: anntype2
+
+A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights:
+
+- Implement @doubledispatch decorator and use it for intersection() and difference().
+
+- Turn isinstance into a SpaceOperation
+
+- Create a few direct tests of the fundamental annotation invariant in test_model.py
+
+- Remove bookkeeper attribute from DictDef and ListDef.
+
+.. branch: cffi-static-callback
+
+.. branch: vecopt-absvalue
+
+- Enhancement. Removed vector fields from AbstractValue.
+
+.. branch: memop-simplify2
+
+Simplification. Backends implement too many loading instructions, only having a slightly different interface.
+Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the
+commonly known loading operations
+
+.. branch: more-rposix
+
+Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and 
+turn them into regular RPython functions. Most RPython-compatible `os.*` 
+functions are now directly accessible as `rpython.rposix.*`.
+
+.. branch: always-enable-gil
+
+Simplify a bit the GIL handling in non-jitted code.  Fixes issue #2205.
+
+.. branch: flowspace-cleanups
+
+Trivial cleanups in flowspace.operation : fix comment & duplicated method
+
+.. branch: test-AF_NETLINK
+.. branch: small-cleanups-misc
+.. branch: cpyext-slotdefs
+.. branch: fix-missing-canraise
+
+.. branch: fix-2211
+
+Fix the cryptic exception message when attempting to use extended slicing in rpython
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -60,6 +60,7 @@
     set PYPY_GC_MAX_DELTA=200MB
     pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit targetpypystandalone
     set PYPY_GC_MAX_DELTA=
+    PYTHONPATH=../.. ./pypy-c ../tool/build_cffi_imports.py
 
 .. _build instructions: http://pypy.org/download.html#building-from-source
 
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -81,9 +81,8 @@
     # register the minimal equivalent of running a small piece of code. This
     # should be used as sparsely as possible, just to register callbacks
 
-    from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode
+    from rpython.rlib.entrypoint import entrypoint_highlevel
     from rpython.rtyper.lltypesystem import rffi, lltype
-    from rpython.rtyper.lltypesystem.lloperation import llop
 
     w_pathsetter = space.appexec([], """():
     def f(path):
@@ -92,7 +91,8 @@
     return f
     """)
 
-    @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home')
+    @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT],
+                          c_name='pypy_setup_home')
     def pypy_setup_home(ll_home, verbose):
         from pypy.module.sys.initpath import pypy_find_stdlib
         verbose = rffi.cast(lltype.Signed, verbose)
@@ -126,30 +126,24 @@
                 debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
             return rffi.cast(rffi.INT, -1)
 
-    @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source')
+    @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source')
     def pypy_execute_source(ll_source):
         return pypy_execute_source_ptr(ll_source, 0)
 
-    @entrypoint('main', [rffi.CCHARP, lltype.Signed],
-                c_name='pypy_execute_source_ptr')
+    @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed],
+                          c_name='pypy_execute_source_ptr')
     def pypy_execute_source_ptr(ll_source, ll_ptr):
-        after = rffi.aroundstate.after
-        if after: after()
         source = rffi.charp2str(ll_source)
         res = _pypy_execute_source(source, ll_ptr)
-        before = rffi.aroundstate.before
-        if before: before()
         return rffi.cast(rffi.INT, res)
 
-    @entrypoint('main', [], c_name='pypy_init_threads')
+    @entrypoint_highlevel('main', [], c_name='pypy_init_threads')
     def pypy_init_threads():
         if not space.config.objspace.usemodules.thread:
             return
         os_thread.setup_threads(space)
-        before = rffi.aroundstate.before
-        if before: before()
 
-    @entrypoint('main', [], c_name='pypy_thread_attach')
+    @entrypoint_highlevel('main', [], c_name='pypy_thread_attach')
     def pypy_thread_attach():
         if not space.config.objspace.usemodules.thread:
             return
@@ -158,8 +152,6 @@
         rthread.gc_thread_start()
         os_thread.bootstrapper.nbthreads += 1
         os_thread.bootstrapper.release()
-        before = rffi.aroundstate.before
-        if before: before()
 
     def _pypy_execute_source(source, c_argument):
         try:
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -289,6 +289,8 @@
             for w_item in space.fixedview(obj):
                 result_w.append(self._make_key(w_item))
             w_key = space.newtuple(result_w[:])
+        elif isinstance(obj, PyCode):
+            w_key = space.newtuple([obj, w_type, space.id(obj)])
         else:
             w_key = space.newtuple([obj, w_type])
         return w_key
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -931,6 +931,11 @@
         finally:
             space.call_function(w_set_debug, space.w_True)
 
+    def test_dont_fold_equal_code_objects(self):
+        yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float
+        yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()",
+                        'repr(x)', '(0.0, -0.0)')
+
 
 class AppTestCompiler:
 
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -2,7 +2,7 @@
 from pypy.interpreter.mixedmodule import MixedModule
 from rpython.rlib import rdynload, clibffi
 
-VERSION = "1.3.1"
+VERSION = "1.4.2"
 
 FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
 try:
diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_cffi_backend/call_python.py
@@ -0,0 +1,133 @@
+import os
+from rpython.rlib.objectmodel import specialize, instantiate
+from rpython.rlib.rarithmetic import intmask
+from rpython.rlib import jit
+from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem.lloperation import llop
+from rpython.rtyper.annlowlevel import llhelper
+
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import interp2app
+from pypy.module._cffi_backend import parse_c_type
+from pypy.module._cffi_backend import cerrno
+from pypy.module._cffi_backend import cffi_opcode
+from pypy.module._cffi_backend import realize_c_type
+from pypy.module._cffi_backend.realize_c_type import getop, getarg
+
+
+STDERR = 2
+EXTERNPY_FN = lltype.FuncType([parse_c_type.PEXTERNPY, rffi.CCHARP],
+                              lltype.Void)
+
+
+def _cffi_call_python(ll_externpy, ll_args):
+    """Invoked by the helpers generated from extern "Python" in the cdef.
+
+       'externpy' is a static structure that describes which of the
+       extern "Python" functions is called.  It has got fields 'name' and
+       'type_index' describing the function, and more reserved fields
+       that are initially zero.  These reserved fields are set up by
+       ffi.def_extern(), which invokes externpy_deco() below.
+
+       'args' is a pointer to an array of 8-byte entries.  Each entry
+       contains an argument.  If an argument is less than 8 bytes, only
+       the part at the beginning of the entry is initialized.  If an
+       argument is 'long double' or a struct/union, then it is passed
+       by reference.
+
+       'args' is also used as the place to write the result to
+       (directly, even if more than 8 bytes).  In all cases, 'args' is
+       at least 8 bytes in size.
+    """
+    from pypy.module._cffi_backend.ccallback import reveal_callback
+    from rpython.rlib import rgil
+


More information about the pypy-commit mailing list