[pypy-commit] pypy py3.6: merge py3.5 into branch

mattip pypy.commits at gmail.com
Mon Jan 28 14:00:54 EST 2019


Author: Matti Picus <matti.picus at gmail.com>
Branch: py3.6
Changeset: r95739:614f05464dbb
Date: 2019-01-28 16:22 +0200
http://bitbucket.org/pypy/pypy/changeset/614f05464dbb/

Log:	merge py3.5 into branch

diff too long, truncating to 2000 out of 2143 lines

diff --git a/extra_tests/cffi_tests/cffi0/test_ffi_backend.py b/extra_tests/cffi_tests/cffi0/test_ffi_backend.py
--- a/extra_tests/cffi_tests/cffi0/test_ffi_backend.py
+++ b/extra_tests/cffi_tests/cffi0/test_ffi_backend.py
@@ -325,19 +325,32 @@
         a = array.array('H', [10000, 20000, 30000])
         c = ffi.from_buffer(a)
         assert ffi.typeof(c) is ffi.typeof("char[]")
+        assert len(c) == 6
         ffi.cast("unsigned short *", c)[1] += 500
         assert list(a) == [10000, 20500, 30000]
-        assert c == ffi.from_buffer(a, True)
+        assert c == ffi.from_buffer("char[]", a, True)
         assert c == ffi.from_buffer(a, require_writable=True)
         #
+        c = ffi.from_buffer("unsigned short[]", a)
+        assert len(c) == 3
+        assert c[1] == 20500
+        #
         p = ffi.from_buffer(b"abcd")
         assert p[2] == b"c"
         #
-        assert p == ffi.from_buffer(b"abcd", False)
-        py.test.raises((TypeError, BufferError), ffi.from_buffer, b"abcd", True)
+        assert p == ffi.from_buffer(b"abcd", require_writable=False)
+        py.test.raises((TypeError, BufferError), ffi.from_buffer,
+                                                 "char[]", b"abcd", True)
         py.test.raises((TypeError, BufferError), ffi.from_buffer, b"abcd",
                                                  require_writable=True)
 
+    def test_release(self):
+        ffi = FFI()
+        p = ffi.new("int[]", 123)
+        ffi.release(p)
+        # here, reading p[0] might give garbage or segfault...
+        ffi.release(p)   # no effect
+
     def test_memmove(self):
         ffi = FFI()
         p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678])
diff --git a/extra_tests/cffi_tests/cffi1/test_ffi_obj.py b/extra_tests/cffi_tests/cffi1/test_ffi_obj.py
--- a/extra_tests/cffi_tests/cffi1/test_ffi_obj.py
+++ b/extra_tests/cffi_tests/cffi1/test_ffi_obj.py
@@ -239,19 +239,31 @@
 def test_ffi_from_buffer():
     import array
     ffi = _cffi1_backend.FFI()
-    a = array.array('H', [10000, 20000, 30000])
+    a = array.array('H', [10000, 20000, 30000, 40000])
     c = ffi.from_buffer(a)
     assert ffi.typeof(c) is ffi.typeof("char[]")
+    assert len(c) == 8
     ffi.cast("unsigned short *", c)[1] += 500
-    assert list(a) == [10000, 20500, 30000]
-    assert c == ffi.from_buffer(a, True)
+    assert list(a) == [10000, 20500, 30000, 40000]
+    py.test.raises(TypeError, ffi.from_buffer, a, True)
+    assert c == ffi.from_buffer("char[]", a, True)
     assert c == ffi.from_buffer(a, require_writable=True)
     #
+    c = ffi.from_buffer("unsigned short[]", a)
+    assert len(c) == 4
+    assert c[1] == 20500
+    #
+    c = ffi.from_buffer("unsigned short[2][2]", a)
+    assert len(c) == 2
+    assert len(c[0]) == 2
+    assert c[0][1] == 20500
+    #
     p = ffi.from_buffer(b"abcd")
     assert p[2] == b"c"
     #
-    assert p == ffi.from_buffer(b"abcd", False)
-    py.test.raises((TypeError, BufferError), ffi.from_buffer, b"abcd", True)
+    assert p == ffi.from_buffer(b"abcd", require_writable=False)
+    py.test.raises((TypeError, BufferError), ffi.from_buffer,
+                                             "char[]", b"abcd", True)
     py.test.raises((TypeError, BufferError), ffi.from_buffer, b"abcd",
                                              require_writable=True)
 
diff --git a/extra_tests/cffi_tests/cffi1/test_new_ffi_1.py b/extra_tests/cffi_tests/cffi1/test_new_ffi_1.py
--- a/extra_tests/cffi_tests/cffi1/test_new_ffi_1.py
+++ b/extra_tests/cffi_tests/cffi1/test_new_ffi_1.py
@@ -1457,6 +1457,35 @@
         import gc; gc.collect(); gc.collect(); gc.collect()
         assert seen == [3]
 
+    def test_release(self):
+        p = ffi.new("int[]", 123)
+        ffi.release(p)
+        # here, reading p[0] might give garbage or segfault...
+        ffi.release(p)   # no effect
+
+    def test_release_new_allocator(self):
+        seen = []
+        def myalloc(size):
+            seen.append(size)
+            return ffi.new("char[]", b"X" * size)
+        def myfree(raw):
+            seen.append(raw)
+        alloc2 = ffi.new_allocator(alloc=myalloc, free=myfree)
+        p = alloc2("int[]", 15)
+        assert seen == [15 * 4]
+        ffi.release(p)
+        assert seen == [15 * 4, p]
+        ffi.release(p)    # no effect
+        assert seen == [15 * 4, p]
+        #
+        del seen[:]
+        p = alloc2("struct ab *")
+        assert seen == [2 * 4]
+        ffi.release(p)
+        assert seen == [2 * 4, p]
+        ffi.release(p)    # no effect
+        assert seen == [2 * 4, p]
+
     def test_CData_CType(self):
         assert isinstance(ffi.cast("int", 0), ffi.CData)
         assert isinstance(ffi.new("int *"), ffi.CData)
@@ -1647,24 +1676,6 @@
         py.test.raises(TypeError, len, q.a)
         py.test.raises(TypeError, list, q.a)
 
-    def test_from_buffer(self):
-        import array
-        a = array.array('H', [10000, 20000, 30000])
-        c = ffi.from_buffer(a)
-        assert ffi.typeof(c) is ffi.typeof("char[]")
-        ffi.cast("unsigned short *", c)[1] += 500
-        assert list(a) == [10000, 20500, 30000]
-        assert c == ffi.from_buffer(a, True)
-        assert c == ffi.from_buffer(a, require_writable=True)
-        #
-        p = ffi.from_buffer(b"abcd")
-        assert p[2] == b"c"
-        #
-        assert p == ffi.from_buffer(b"abcd", False)
-        py.test.raises((TypeError, BufferError), ffi.from_buffer, b"abcd", True)
-        py.test.raises((TypeError, BufferError), ffi.from_buffer, b"abcd",
-                                                 require_writable=True)
-
     def test_all_primitives(self):
         assert set(PRIMITIVE_TO_INDEX) == set([
             "char",
diff --git a/extra_tests/test_bufferedreader.py b/extra_tests/test_bufferedreader.py
--- a/extra_tests/test_bufferedreader.py
+++ b/extra_tests/test_bufferedreader.py
@@ -88,7 +88,7 @@
         assert self.stream.readline(80) == expected
 
 @pytest.mark.parametrize('StreamCls', [Stream, StreamCFFI])
- at settings(max_examples=50)
+ at settings(max_examples=50, deadline=None)
 @given(params=data_and_sizes(), chunk_size=st.integers(MIN_READ_SIZE, 8192))
 def test_stateful(params, chunk_size, StreamCls):
     data, sizes = params
diff --git a/extra_tests/test_datetime.py b/extra_tests/test_datetime.py
--- a/extra_tests/test_datetime.py
+++ b/extra_tests/test_datetime.py
@@ -33,7 +33,9 @@
     (timedelta_safe(1, 2, 3), "timedelta_safe(1, 2, 3)"),
 ])
 def test_repr(obj, expected):
-    assert repr(obj) == expected
+    # XXX: there's a discrepancy between datetime.py and CPython's _datetime
+    # for the repr() of Python-defined subclasses of datetime classes.
+    assert repr(obj).endswith(expected)
 
 @pytest.mark.parametrize("obj", [
     datetime.date.today(),
diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py
--- a/lib-python/3/distutils/sysconfig_pypy.py
+++ b/lib-python/3/distutils/sysconfig_pypy.py
@@ -10,7 +10,7 @@
 
 import sys
 import os
-import imp, _imp
+import _imp
 
 from distutils.errors import DistutilsPlatformError
 
diff --git a/lib-python/3/test/test_dictviews.py b/lib-python/3/test/test_dictviews.py
--- a/lib-python/3/test/test_dictviews.py
+++ b/lib-python/3/test/test_dictviews.py
@@ -1,3 +1,4 @@
+from test import support
 import collections
 import copy
 import pickle
@@ -5,6 +6,7 @@
 
 class DictSetTest(unittest.TestCase):
 
+    @support.cpython_only
     def test_constructors_not_callable(self):
         kt = type({}.keys())
         self.assertRaises(TypeError, kt, {})
diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py
--- a/lib_pypy/_ctypes/primitive.py
+++ b/lib_pypy/_ctypes/primitive.py
@@ -410,6 +410,6 @@
                                             id(self))
 
     def __bool__(self):
-        return self._buffer[0] not in (0, '\x00')
+        return self._buffer[0] not in (0, b'\x00')
 
 from _ctypes.function import CFuncPtr
diff --git a/lib_pypy/_gdbm.py b/lib_pypy/_gdbm.py
--- a/lib_pypy/_gdbm.py
+++ b/lib_pypy/_gdbm.py
@@ -74,12 +74,11 @@
             self.__check_closed()
             key = _checkstr(key)
             return lib.pygdbm_exists(self.__ll_dbm, key, len(key))
-    has_key = __contains__
 
     def get(self, key, default=None):
         with _lock:
             self.__check_closed()
-            key = _checkstr(key)        
+            key = _checkstr(key)
             drec = lib.pygdbm_fetch(self.__ll_dbm, key, len(key))
             if not drec.dptr:
                 return default
diff --git a/lib_pypy/_pypy_collections.py b/lib_pypy/_pypy_collections.py
--- a/lib_pypy/_pypy_collections.py
+++ b/lib_pypy/_pypy_collections.py
@@ -1,6 +1,5 @@
 from __pypy__ import reversed_dict, move_to_end, objects_in_repr
 from _operator import eq as _eq
-import _collections_abc
 
 
 class OrderedDict(dict):
@@ -29,7 +28,33 @@
             raise TypeError('expected at most 1 arguments, got %d' % len(args))
         self.__update(*args, **kwds)
 
-    update = __update = _collections_abc.MutableMapping.update
+    def update(*args, **kwds):
+        ''' D.update([E, ]**F) -> None.  Update D from mapping/iterable E and F.
+            If E present and has a .keys() method, does:     for k in E: D[k] = E[k]
+            If E present and lacks .keys() method, does:     for (k, v) in E: D[k] = v
+            In either case, this is followed by: for k, v in F.items(): D[k] = v
+        '''
+        if not args:
+            raise TypeError("descriptor 'update' of 'OrderedDict' object "
+                            "needs an argument")
+        self, *args = args
+        if len(args) > 1:
+            raise TypeError('update expected at most 1 arguments, got %d' %
+                            len(args))
+        if args:
+            other = args[0]
+            if hasattr(other, 'items'):
+                for key, value in other.items():
+                    self[key] = value
+            elif hasattr(other, "keys"):
+                for key in other.keys():
+                    self[key] = other[key]
+            else:
+                for key, value in other:
+                    self[key] = value
+        for key, value in kwds.items():
+            self[key] = value
+    __update = update
 
     def __reversed__(self):
         return reversed_dict(self)
@@ -106,17 +131,20 @@
         "D.values() -> an object providing a view on D's values"
         return _OrderedDictValuesView(self)
 
+dict_keys = type({}.keys())
+dict_values = type({}.values())
+dict_items = type({}.items())
 
-class _OrderedDictKeysView(_collections_abc.KeysView):
+class _OrderedDictKeysView(dict_keys):
     def __reversed__(self):
-        yield from reversed_dict(self._mapping)
+        yield from reversed_dict(self._dict)
 
-class _OrderedDictItemsView(_collections_abc.ItemsView):
+class _OrderedDictItemsView(dict_items):
     def __reversed__(self):
-        for key in reversed_dict(self._mapping):
-            yield (key, self._mapping[key])
+        for key in reversed_dict(self._dict):
+            yield (key, self._dict[key])
 
-class _OrderedDictValuesView(_collections_abc.ValuesView):
+class _OrderedDictValuesView(dict_values):
     def __reversed__(self):
-        for key in reversed_dict(self._mapping):
-            yield self._mapping[key]
+        for key in reversed_dict(self._dict):
+            yield self._dict[key]
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -16,6 +16,8 @@
     # Python 3.x
     basestring = str
 
+_unspecified = object()
+
 
 
 class FFI(object):
@@ -341,15 +343,22 @@
    #    """
    #    note that 'buffer' is a type, set on this instance by __init__
 
-    def from_buffer(self, python_buffer, require_writable=False):
-        """Return a <cdata 'char[]'> that points to the data of the
+    def from_buffer(self, cdecl, python_buffer=_unspecified,
+                    require_writable=False):
+        """Return a cdata of the given type pointing to the data of the
         given Python object, which must support the buffer interface.
         Note that this is not meant to be used on the built-in types
         str or unicode (you can build 'char[]' arrays explicitly)
         but only on objects containing large quantities of raw data
         in some other format, like 'array.array' or numpy arrays.
+
+        The first argument is optional and default to 'char[]'.
         """
-        return self._backend.from_buffer(self.BCharA, python_buffer,
+        if python_buffer is _unspecified:
+            cdecl, python_buffer = self.BCharA, cdecl
+        elif isinstance(cdecl, basestring):
+            cdecl = self._typeof(cdecl)
+        return self._backend.from_buffer(cdecl, python_buffer,
                                          require_writable)
 
     def memmove(self, dest, src, n):
@@ -530,6 +539,9 @@
     def from_handle(self, x):
         return self._backend.from_handle(x)
 
+    def release(self, x):
+        self._backend.release(x)
+
     def set_unicode(self, enabled_flag):
         """Windows: if 'enabled_flag' is True, enable the UNICODE and
         _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -16,6 +16,13 @@
 except ImportError:
     lock = None
 
+def _workaround_for_static_import_finders():
+    # Issue #392: packaging tools like cx_Freeze can not find these
+    # because pycparser uses exec dynamic import.  This is an obscure
+    # workaround.  This function is never called.
+    import pycparser.yacctab
+    import pycparser.lextab
+
 CDEF_SOURCE_STRING = "<cdef source string>"
 _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$",
                         re.DOTALL | re.MULTILINE)
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -14,6 +14,18 @@
 rsyncdirs = ['.', '../lib-python', '../lib_pypy', '../demo']
 rsyncignore = ['_cache']
 
+try:
+    from hypothesis import settings, __version__
+except ImportError:
+    pass
+else:
+    if __version__[:2] < '3.6':
+        s = settings(deadline=None)
+        settings.register_profile('default', s)
+    else:
+        settings.register_profile('default', deadline=None)
+    settings.load_profile('default')
+
 # PyPy's command line extra options (these are added
 # to py.test's standard options)
 #
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -65,10 +65,15 @@
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
+
+# Make sure to keep this in sync with:
+#    module/sys/version.py
+#    module/cpyext/include/patchlevel.h
+#
 # The short X.Y version.
-version = '6.0'
+version = '7.1'
 # The full version, including alpha/beta/rc tags.
-release = '6.0.0'
+release = '7.1.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -16,9 +16,6 @@
 How to Create a PyPy Release
 ++++++++++++++++++++++++++++
 
-Overview
---------
-
 As a meta rule setting up issues in the tracker for items here may help not
 forgetting things. A set of todo files may also work.
 
@@ -28,17 +25,54 @@
 
 
 Release Steps
--------------
+++++++++++++++
 
-* If needed, make a release branch
-* Bump the
-  pypy version number in module/sys/version.py and in
-  module/cpyext/include/patchlevel.h and in doc/conf.py. The branch
-  will capture the revision number of this change for the release.
+Make the release branch
+------------------------
 
-  Some of the next updates may be done before or after branching; make
-  sure things are ported back to the trunk and to the branch as
-  necessary.
+This is needed only in case you are doing a new major version; if not, you can
+probably reuse the existing release branch.
+
+We want to be able to freely merge default into the branch and vice-versa;
+thus we need to do a complicate dance to avoid to patch the version number
+when we do a merge::
+
+  $ hg up -r default
+  $ # edit the version to e.g. 7.0.0-final
+  $ hg ci
+  $ hg branch release-pypy2.7-7.x && hg ci
+  $ hg up -r default
+  $ # edit the version to 7.1.0-alpha0
+  $ hg ci
+  $ hg up -r release-pypy2.7-7.x
+  $ hg merge default
+  $ # edit the version to AGAIN 7.0.0-final
+  $ hg ci
+
+Then, we need to do the same for the 3.x branch::
+
+  $ hg up -r py3.5
+  $ hg merge default # this brings the version fo 7.1.0-alpha0
+  $ hg branch release-pypy3.5-7.x
+  $ # edit the version to 7.0.0-final
+  $ hg ci
+  $ hg up -r py3.5
+  $ hg merge release-pypy3.5-7.x
+  $ # edit the version to 7.1.0-alpha0
+  $ hg ci
+
+To change the version, you need to edit three files:
+
+  - ``module/sys/version.py``
+
+  - ``module/cpyext/include/patchlevel.h``
+
+  - ``doc/conf.py``
+
+
+Other steps
+-----------
+
 
 * Make sure the RPython builds on the buildbot pass with no failures
 
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -3,7 +3,6 @@
 # See test/test_app_main.
 
 # Missing vs CPython: -b, -d, -x
-from __future__ import print_function, unicode_literals
 USAGE1 = __doc__ = """\
 Options and arguments (and corresponding environment variables):
 -B     : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x
@@ -334,7 +333,7 @@
             del encerr
 
 def create_stdio(fd, writing, name, encoding, errors, unbuffered):
-    import io
+    import _io
     # stdin is always opened in buffered mode, first because it
     # shouldn't make a difference in common use cases, second because
     # TextIOWrapper depends on the presence of a read1() method which
@@ -342,7 +341,7 @@
     buffering = 0 if unbuffered and writing else -1
     mode = 'w' if writing else 'r'
     try:
-        buf = io.open(fd, mode + 'b', buffering, closefd=False)
+        buf = _io.open(fd, mode + 'b', buffering, closefd=False)
     except OSError as e:
         if e.errno != errno.EBADF:
             raise
@@ -352,7 +351,7 @@
     raw.name = name
     # translate \r\n to \n for sys.stdin on Windows
     newline = None if sys.platform == 'win32' and not writing else '\n'
-    stream = io.TextIOWrapper(buf, encoding, errors, newline=newline,
+    stream = _io.TextIOWrapper(buf, encoding, errors, newline=newline,
                               line_buffering=unbuffered or raw.isatty())
     stream.mode = mode
     return stream
@@ -549,12 +548,6 @@
 
     return options
 
-# this indirection is needed to be able to import this module on python2, else
-# we have a SyntaxError: unqualified exec in a nested function
- at hidden_applevel
-def exec_(src, dic):
-    exec(src, dic)
-
 @hidden_applevel
 def run_command_line(interactive,
                      inspect,
@@ -664,7 +657,7 @@
             else:
                 if not isolated:
                     sys.path.insert(0, '')
-                success = run_toplevel(exec_, bytes, mainmodule.__dict__)
+                success = run_toplevel(exec, bytes, mainmodule.__dict__)
         elif run_module != 0:
             # handle the "-m" command
             # '' on sys.path is required also here
@@ -704,7 +697,7 @@
                                                         python_startup,
                                                         'exec',
                                                         PyCF_ACCEPT_NULL_BYTES)
-                            exec_(co_python_startup, mainmodule.__dict__)
+                            exec(co_python_startup, mainmodule.__dict__)
                         mainmodule.__file__ = python_startup
                         mainmodule.__cached__ = None
                         run_toplevel(run_it)
@@ -722,7 +715,7 @@
                 def run_it():
                     co_stdin = compile(sys.stdin.read(), '<stdin>', 'exec',
                                        PyCF_ACCEPT_NULL_BYTES)
-                    exec_(co_stdin, mainmodule.__dict__)
+                    exec(co_stdin, mainmodule.__dict__)
                 mainmodule.__file__ = '<stdin>'
                 mainmodule.__cached__ = None
                 success = run_toplevel(run_it)
@@ -764,7 +757,7 @@
                         co = marshal.load(f)
                     if type(co) is not type((lambda:0).__code__):
                         raise RuntimeError("Bad code object in .pyc file")
-                    exec_(co, namespace)
+                    exec(co, namespace)
                 args = (execfile, filename, mainmodule.__dict__)
             else:
                 filename = sys.argv[0]
@@ -792,7 +785,7 @@
                             code = f.read()
                         co = compile(code, filename, 'exec',
                                      PyCF_ACCEPT_NULL_BYTES)
-                        exec_(co, namespace)
+                        exec(co, namespace)
                     args = (execfile, filename, mainmodule.__dict__)
             success = run_toplevel(*args)
 
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -1043,36 +1043,6 @@
         assert data.startswith("15\\u20ac ('strict', 'backslashreplace')")
 
 
-class TestAppMain:
-    def test_print_info(self):
-        from pypy.interpreter import app_main
-        import sys, cStringIO
-        prev_so = sys.stdout
-        prev_ti = getattr(sys, 'pypy_translation_info', 'missing')
-        sys.pypy_translation_info = {
-            'translation.foo': True,
-            'translation.bar': 42,
-            'translation.egg.something': None,
-            'objspace.x': 'hello',
-        }
-        try:
-            sys.stdout = f = cStringIO.StringIO()
-            py.test.raises(SystemExit, app_main.print_info)
-        finally:
-            sys.stdout = prev_so
-            if prev_ti == 'missing':
-                del sys.pypy_translation_info
-            else:
-                sys.pypy_translation_info = prev_ti
-        assert f.getvalue() == ("[objspace]\n"
-                                "    x = 'hello'\n"
-                                "[translation]\n"
-                                "    bar = 42\n"
-                                "    [egg]\n"
-                                "        something = None\n"
-                                "    foo = True\n")
-
-
 @py.test.mark.skipif('config.getoption("runappdirect")')
 class AppTestAppMain:
     def setup_class(self):
diff --git a/pypy/module/__builtin__/state.py b/pypy/module/__builtin__/state.py
--- a/pypy/module/__builtin__/state.py
+++ b/pypy/module/__builtin__/state.py
@@ -2,8 +2,8 @@
 class State:
     def __init__(self, space):
         self.w_open = space.appexec([], """():
-                import io
-                return io.open""")
-        
+                import _io
+                return _io.open""")
+
 def get(space):
     return space.fromcache(State)
diff --git a/pypy/module/__pypy__/test/test_builders.py b/pypy/module/__pypy__/test/test_builders.py
--- a/pypy/module/__pypy__/test/test_builders.py
+++ b/pypy/module/__pypy__/test/test_builders.py
@@ -4,32 +4,32 @@
     def test_simple(self):
         from __pypy__.builders import StringBuilder
         b = StringBuilder()
-        b.append("abc")
-        b.append("123")
-        b.append("1")
+        b.append(u"abc")
+        b.append(u"123")
+        b.append(u"1")
         s = b.build()
-        assert s == "abc1231"
+        assert s == u"abc1231"
         assert b.build() == s
-        b.append("123")
-        assert b.build() == s + "123"
+        b.append(u"123")
+        assert b.build() == s + u"123"
 
     def test_preallocate(self):
         from __pypy__.builders import StringBuilder
         b = StringBuilder(10)
-        b.append("abc")
-        b.append("123")
+        b.append(u"abc")
+        b.append(u"123")
         s = b.build()
-        assert s == "abc123"
+        assert s == u"abc123"
 
     def test_append_slice(self):
         from __pypy__.builders import StringBuilder
         b = StringBuilder()
-        b.append_slice("abcdefgh", 2, 5)
-        raises(ValueError, b.append_slice, "1", 2, 1)
+        b.append_slice(u"abcdefgh", 2, 5)
+        raises(ValueError, b.append_slice, u"1", 2, 1)
         s = b.build()
-        assert s == "cde"
-        b.append_slice("abc", 1, 2)
-        assert b.build() == "cdeb"
+        assert s == u"cde"
+        b.append_slice(u"abc", 1, 2)
+        assert b.build() == u"cdeb"
 
     def test_stringbuilder(self):
         from __pypy__.builders import BytesBuilder
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -52,6 +52,7 @@
         'unpack': 'func.unpack',
         'buffer': 'cbuffer.MiniBuffer',
         'memmove': 'func.memmove',
+        'release': 'func.release',
 
         'get_errno': 'cerrno.get_errno',
         'set_errno': 'cerrno.set_errno',
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -476,6 +476,19 @@
     def get_structobj(self):
         return None
 
+    def enter_exit(self, exit_now):
+        raise oefmt(self.space.w_ValueError,
+            "only 'cdata' object from ffi.new(), ffi.gc(), ffi.from_buffer() "
+            "or ffi.new_allocator()() can be used with the 'with' keyword or "
+            "ffi.release()")
+
+    def descr_enter(self):
+        self.enter_exit(False)
+        return self
+
+    def descr_exit(self, args_w):
+        self.enter_exit(True)
+
 
 class W_CDataMem(W_CData):
     """This is used only by the results of cffi.cast('int', x)
@@ -528,14 +541,33 @@
     def get_structobj(self):
         return self
 
+    def enter_exit(self, exit_now):
+        from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
+        if not isinstance(self.ctype, W_CTypePtrOrArray):
+            W_CData.enter_exit(self, exit_now)
+        elif exit_now:
+            self._do_exit()
+
+    def _do_exit(self):
+        raise NotImplementedError
+
 
 class W_CDataNewStd(W_CDataNewOwning):
     """Subclass using the standard allocator, lltype.malloc()/lltype.free()"""
-    _attrs_ = []
+    _attrs_ = ['explicitly_freed']
+    explicitly_freed = False
 
     @rgc.must_be_light_finalizer
     def __del__(self):
-        lltype.free(self._ptr, flavor='raw')
+        if not self.explicitly_freed:
+            lltype.free(self._ptr, flavor='raw')
+
+    def _do_exit(self):
+        if not self.explicitly_freed:
+            rgc.add_memory_pressure(-self._sizeof(), self)
+            self.explicitly_freed = True
+            rgc.may_ignore_finalizer(self)
+            lltype.free(self._ptr, flavor='raw')
 
 
 class W_CDataNewNonStd(W_CDataNewOwning):
@@ -543,7 +575,16 @@
     _attrs_ = ['w_raw_cdata', 'w_free']
 
     def _finalize_(self):
-        self.space.call_function(self.w_free, self.w_raw_cdata)
+        if self.w_free is not None:
+            self.space.call_function(self.w_free, self.w_raw_cdata)
+
+    def _do_exit(self):
+        w_free = self.w_free
+        if w_free is not None:
+            rgc.add_memory_pressure(-self._sizeof(), self)
+            self.w_free = None
+            self.may_unregister_rpython_finalizer(self.space)
+            self.space.call_function(w_free, self.w_raw_cdata)
 
 
 class W_CDataPtrToStructOrUnion(W_CData):
@@ -573,6 +614,12 @@
         else:
             return None
 
+    def enter_exit(self, exit_now):
+        if exit_now:
+            structobj = self.structobj
+            if isinstance(structobj, W_CDataNewOwning):
+                structobj._do_exit()
+
 
 class W_CDataSliced(W_CData):
     """Subclass with an explicit length, for slices."""
@@ -611,21 +658,28 @@
 
 class W_CDataFromBuffer(W_CData):
     _attrs_ = ['buf', 'length', 'w_keepalive']
-    _immutable_fields_ = ['buf', 'length', 'w_keepalive']
+    _immutable_fields_ = ['buf', 'length']
 
-    def __init__(self, space, cdata, ctype, buf, w_object):
+    def __init__(self, space, cdata, length, ctype, buf, w_object):
         W_CData.__init__(self, space, cdata, ctype)
         self.buf = buf
-        self.length = buf.getlength()
+        self.length = length
         self.w_keepalive = w_object
 
     def get_array_length(self):
         return self.length
 
     def _repr_extra(self):
-        w_repr = self.space.repr(self.w_keepalive)
-        return "buffer len %d from '%s' object" % (
-            self.length, self.space.type(self.w_keepalive).name)
+        if self.w_keepalive is not None:
+            name = self.space.type(self.w_keepalive).name
+        else:
+            name = "(released)"
+        return "buffer len %d from '%s' object" % (self.length, name)
+
+    def enter_exit(self, exit_now):
+        # for now, limited effect on PyPy
+        if exit_now:
+            self.w_keepalive = None
 
 
 class W_CDataGCP(W_CData):
@@ -640,6 +694,9 @@
         self.register_finalizer(space)
 
     def _finalize_(self):
+        self.invoke_finalizer()
+
+    def invoke_finalizer(self):
         w_destructor = self.w_destructor
         if w_destructor is not None:
             self.w_destructor = None
@@ -649,6 +706,11 @@
         self.w_destructor = None
         self.may_unregister_rpython_finalizer(self.space)
 
+    def enter_exit(self, exit_now):
+        if exit_now:
+            self.may_unregister_rpython_finalizer(self.space)
+            self.invoke_finalizer()
+
 
 W_CData.typedef = TypeDef(
     '_cffi_backend.CData',
@@ -678,5 +740,7 @@
     __iter__ = interp2app(W_CData.iter),
     __weakref__ = make_weakref_descr(W_CData),
     __dir__ = interp2app(W_CData.dir),
+    __enter__ = interp2app(W_CData.descr_enter),
+    __exit__ = interp2app(W_CData.descr_exit),
     )
 W_CData.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py
--- a/pypy/module/_cffi_backend/ctypearray.py
+++ b/pypy/module/_cffi_backend/ctypearray.py
@@ -25,7 +25,7 @@
         assert isinstance(ctptr, W_CTypePointer)
         W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0,
                                    ctptr.ctitem)
-        self.length = length
+        self.length = length    # -1 if no length is given, e.g. 'int[]'
         self.ctptr = ctptr
 
     def _alignof(self):
@@ -86,7 +86,7 @@
     def _check_subscript_index(self, w_cdata, i):
         space = self.space
         if i < 0:
-            raise oefmt(space.w_IndexError, "negative index not supported")
+            raise oefmt(space.w_IndexError, "negative index")
         if i >= w_cdata.get_array_length():
             raise oefmt(space.w_IndexError,
                         "index too large for cdata '%s' (expected %d < %d)",
@@ -96,7 +96,7 @@
     def _check_slice_index(self, w_cdata, start, stop):
         space = self.space
         if start < 0:
-            raise oefmt(space.w_IndexError, "negative index not supported")
+            raise oefmt(space.w_IndexError, "negative index")
         if stop > w_cdata.get_array_length():
             raise oefmt(space.w_IndexError,
                         "index too large (expected %d <= %d)",
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -328,7 +328,8 @@
 
 
     @unwrap_spec(require_writable=int)
-    def descr_from_buffer(self, w_python_buffer, require_writable=0):
+    def descr_from_buffer(self, w_cdecl, w_python_buffer=None,
+                                require_writable=0):
         """\
 Return a <cdata 'char[]'> that points to the data of the given Python
 object, which must support the buffer interface.  Note that this is
@@ -337,9 +338,13 @@
 containing large quantities of raw data in some other format, like
 'array.array' or numpy arrays."""
         #
-        w_ctchara = newtype._new_chara_type(self.space)
-        return func._from_buffer(self.space, w_ctchara, w_python_buffer,
-                                 require_writable)
+        if w_python_buffer is None:
+            w_python_buffer = w_cdecl
+            w_ctype = newtype._new_chara_type(self.space)
+        else:
+            w_ctype = self.ffi_type(w_cdecl, ACCEPT_STRING | ACCEPT_CTYPE)
+        return func.from_buffer(self.space, w_ctype, w_python_buffer,
+                                require_writable)
 
 
     @unwrap_spec(w_arg=W_CData)
@@ -703,6 +708,16 @@
                 pass
         return w_res
 
+    @unwrap_spec(w_cdata=W_CData)
+    def descr_release(self, w_cdata):
+        """\
+Release now the resources held by a 'cdata' object from ffi.new(),
+ffi.gc() or ffi.from_buffer().  The cdata object must not be used
+afterwards.
+
+'ffi.release(cdata)' is equivalent to 'cdata.__exit__()'."""
+        w_cdata.enter_exit(True)
+
 
 class W_InitOnceLock(W_Root):
     def __init__(self, space):
@@ -777,6 +792,7 @@
         new_allocator = interp2app(W_FFIObject.descr_new_allocator),
         new_handle  = interp2app(W_FFIObject.descr_new_handle),
         offsetof    = interp2app(W_FFIObject.descr_offsetof),
+        release     = interp2app(W_FFIObject.descr_release),
         sizeof      = interp2app(W_FFIObject.descr_sizeof),
         string      = interp2app(W_FFIObject.descr_string),
         typeof      = interp2app(W_FFIObject.descr_typeof),
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -112,16 +112,10 @@
 
 @unwrap_spec(w_ctype=ctypeobj.W_CType, require_writable=int)
 def from_buffer(space, w_ctype, w_x, require_writable=0):
-    from pypy.module._cffi_backend import ctypearray, ctypeprim
-    #
-    if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or
-        not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)):
-        raise oefmt(space.w_TypeError,
-                    "needs 'char[]', got '%s'", w_ctype.name)
-    #
-    return _from_buffer(space, w_ctype, w_x, require_writable)
-
-def _from_buffer(space, w_ctype, w_x, require_writable):
+    from pypy.module._cffi_backend import ctypearray
+    if not isinstance(w_ctype, ctypearray.W_CTypeArray):
+        raise oefmt(space.w_TypeError, "expected an array ctype, got '%s'",
+                    w_ctype.name)
     if space.isinstance_w(w_x, space.w_unicode):
         raise oefmt(space.w_TypeError,
                 "from_buffer() cannot return the address of a unicode object")
@@ -140,7 +134,37 @@
                         "buffer interface but cannot be rendered as a plain "
                         "raw address on PyPy", w_x)
     #
-    return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+    buffersize = buf.getlength()
+    arraylength = w_ctype.length
+    if arraylength >= 0:
+        # it's an array with a fixed length; make sure that the
+        # buffer contains enough bytes.
+        if buffersize < w_ctype.size:
+            raise oefmt(space.w_ValueError,
+                "buffer is too small (%d bytes) for '%s' (%d bytes)",
+                buffersize, w_ctype.name, w_ctype.size)
+    else:
+        # it's an open 'array[]'
+        itemsize = w_ctype.ctitem.size
+        if itemsize == 1:
+            # fast path, performance only
+            arraylength = buffersize
+        elif itemsize > 0:
+            # give it as many items as fit the buffer.  Ignore a
+            # partial last element.
+            arraylength = buffersize / itemsize
+        else:
+            # it's an array 'empty[]'.  Unsupported obscure case:
+            # the problem is that setting the length of the result
+            # to anything large (like SSIZE_T_MAX) is dangerous,
+            # because if someone tries to loop over it, it will
+            # turn effectively into an infinite loop.
+            raise oefmt(space.w_ZeroDivisionError,
+                "from_buffer('%s', ..): the actual length of the array "
+                "cannot be computed", w_ctype.name)
+    #
+    return cdataobj.W_CDataFromBuffer(space, _cdata, arraylength,
+                                      w_ctype, buf, w_x)
 
 # ____________________________________________________________
 
@@ -264,3 +288,7 @@
 @unwrap_spec(w_cdata=cdataobj.W_CData, size=int)
 def gcp(space, w_cdata, w_destructor, size=0):
     return w_cdata.with_gc(w_destructor, size)
+
+ at unwrap_spec(w_cdata=cdataobj.W_CData)
+def release(space, w_cdata):
+    w_cdata.enter_exit(True)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -3742,6 +3742,64 @@
     p1[0] = b"g"
     assert ba == b"goo"
 
+def test_from_buffer_types():
+    BInt = new_primitive_type("int")
+    BIntP = new_pointer_type(BInt)
+    BIntA = new_array_type(BIntP, None)
+    lst = [-12345678, 87654321, 489148]
+    bytestring = buffer(newp(BIntA, lst))[:] + b'XYZ'
+    #
+    p1 = from_buffer(BIntA, bytestring)      # int[]
+    assert typeof(p1) is BIntA
+    assert len(p1) == 3
+    assert p1[0] == lst[0]
+    assert p1[1] == lst[1]
+    assert p1[2] == lst[2]
+    py.test.raises(IndexError, "p1[3]")
+    py.test.raises(IndexError, "p1[-1]")
+    #
+    py.test.raises(TypeError, from_buffer, BInt, bytestring)
+    py.test.raises(TypeError, from_buffer, BIntP, bytestring)
+    #
+    BIntA2 = new_array_type(BIntP, 2)
+    p2 = from_buffer(BIntA2, bytestring)     # int[2]
+    assert typeof(p2) is BIntA2
+    assert len(p2) == 2
+    assert p2[0] == lst[0]
+    assert p2[1] == lst[1]
+    py.test.raises(IndexError, "p2[2]")
+    py.test.raises(IndexError, "p2[-1]")
+    assert p2 == p1
+    #
+    BIntA4 = new_array_type(BIntP, 4)        # int[4]: too big
+    py.test.raises(ValueError, from_buffer, BIntA4, bytestring)
+    #
+    BStruct = new_struct_type("foo")
+    complete_struct_or_union(BStruct, [('a1', BInt, -1),
+                                       ('a2', BInt, -1)])
+    BStructP = new_pointer_type(BStruct)
+    BStructA = new_array_type(BStructP, None)
+    p1 = from_buffer(BStructA, bytestring)   # struct[]
+    assert len(p1) == 1
+    assert typeof(p1) is BStructA
+    assert p1[0].a1 == lst[0]
+    assert p1[0].a2 == lst[1]
+    py.test.raises(IndexError, "p1[1]")
+    #
+    BEmptyStruct = new_struct_type("empty")
+    complete_struct_or_union(BEmptyStruct, [], Ellipsis, 0)
+    assert sizeof(BEmptyStruct) == 0
+    BEmptyStructP = new_pointer_type(BEmptyStruct)
+    BEmptyStructA = new_array_type(BEmptyStructP, None)
+    py.test.raises(ZeroDivisionError, from_buffer,      # empty[]
+                                      BEmptyStructA, bytestring)
+    #
+    BEmptyStructA5 = new_array_type(BEmptyStructP, 5)
+    p1 = from_buffer(BEmptyStructA5, bytestring)   # struct empty[5]
+    assert typeof(p1) is BEmptyStructA5
+    assert len(p1) == 5
+    assert cast(BIntP, p1) == from_buffer(BIntA, bytestring)
+
 def test_memmove():
     Short = new_primitive_type("short")
     ShortA = new_array_type(new_pointer_type(Short), None)
@@ -4074,3 +4132,114 @@
     assert_eq(cast(t5, 7.0), cast(t3, 7))
     assert_lt(cast(t5, 3.1), 3.101)
     assert_gt(cast(t5, 3.1), 3)
+
+def test_explicit_release_new():
+    # release() on a ffi.new() object has no effect on CPython, but
+    # really releases memory on PyPy.  We can't test that effect
+    # though, because a released cdata is not marked.
+    BIntP = new_pointer_type(new_primitive_type("int"))
+    p = newp(BIntP)
+    p[0] = 42
+    py.test.raises(IndexError, "p[1]")
+    release(p)
+    # here, reading p[0] might give garbage or segfault...
+    release(p)   # no effect
+    #
+    BStruct = new_struct_type("struct foo")
+    BStructP = new_pointer_type(BStruct)
+    complete_struct_or_union(BStruct, [('p', BIntP, -1)])
+    pstruct = newp(BStructP)
+    assert pstruct.p == cast(BIntP, 0)
+    release(pstruct)
+    # here, reading pstruct.p might give garbage or segfault...
+    release(pstruct)   # no effect
+
+def test_explicit_release_new_contextmgr():
+    BIntP = new_pointer_type(new_primitive_type("int"))
+    with newp(BIntP) as p:
+        p[0] = 42
+        assert p[0] == 42
+    # here, reading p[0] might give garbage or segfault...
+    release(p)   # no effect
+
+def test_explicit_release_badtype():
+    BIntP = new_pointer_type(new_primitive_type("int"))
+    p = cast(BIntP, 12345)
+    py.test.raises(ValueError, release, p)
+    py.test.raises(ValueError, release, p)
+    BStruct = new_struct_type("struct foo")
+    BStructP = new_pointer_type(BStruct)
+    complete_struct_or_union(BStruct, [('p', BIntP, -1)])
+    pstruct = newp(BStructP)
+    py.test.raises(ValueError, release, pstruct[0])
+
+def test_explicit_release_badtype_contextmgr():
+    BIntP = new_pointer_type(new_primitive_type("int"))
+    p = cast(BIntP, 12345)
+    py.test.raises(ValueError, "with p: pass")
+    py.test.raises(ValueError, "with p: pass")
+
+def test_explicit_release_gc():
+    BIntP = new_pointer_type(new_primitive_type("int"))
+    seen = []
+    intp1 = newp(BIntP, 12345)
+    p1 = cast(BIntP, intp1)
+    p = gcp(p1, seen.append)
+    assert seen == []
+    release(p)
+    assert seen == [p1]
+    assert p1[0] == 12345
+    assert p[0] == 12345  # true so far, but might change to raise RuntimeError
+    release(p)   # no effect
+
+def test_explicit_release_gc_contextmgr():
+    BIntP = new_pointer_type(new_primitive_type("int"))
+    seen = []
+    intp1 = newp(BIntP, 12345)
+    p1 = cast(BIntP, intp1)
+    p = gcp(p1, seen.append)
+    with p:
+        assert p[0] == 12345
+        assert seen == []
+    assert seen == [p1]
+    assert p1[0] == 12345
+    assert p[0] == 12345  # true so far, but might change to raise RuntimeError
+    release(p)   # no effect
+
+def test_explicit_release_from_buffer():
+    a = bytearray(b"xyz")
+    BChar = new_primitive_type("char")
+    BCharP = new_pointer_type(BChar)
+    BCharA = new_array_type(BCharP, None)
+    p = from_buffer(BCharA, a)
+    assert p[2] == b"z"
+    release(p)
+    assert p[2] == b"z"  # true so far, but might change to raise RuntimeError
+    release(p)   # no effect
+
+def test_explicit_release_from_buffer_contextmgr():
+    a = bytearray(b"xyz")
+    BChar = new_primitive_type("char")
+    BCharP = new_pointer_type(BChar)
+    BCharA = new_array_type(BCharP, None)
+    p = from_buffer(BCharA, a)
+    with p:
+        assert p[2] == b"z"
+    assert p[2] == b"z"  # true so far, but might change to raise RuntimeError
+    release(p)   # no effect
+
+def test_explicit_release_bytearray_on_cpython():
+    if '__pypy__' in sys.builtin_module_names:
+        py.test.skip("pypy's bytearray are never locked")
+    a = bytearray(b"xyz")
+    BChar = new_primitive_type("char")
+    BCharP = new_pointer_type(BChar)
+    BCharA = new_array_type(BCharP, None)
+    a += b't' * 10
+    p = from_buffer(BCharA, a)
+    py.test.raises(BufferError, "a += b'u' * 100")
+    release(p)
+    a += b'v' * 100
+    release(p)   # no effect
+    a += b'w' * 1000
+    assert a == bytearray(b"xyz" + b't' * 10 + b'v' * 100 + b'w' * 1000)
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -282,19 +282,31 @@
         import _cffi_backend as _cffi1_backend
         import array
         ffi = _cffi1_backend.FFI()
-        a = array.array('H', [10000, 20000, 30000])
+        a = array.array('H', [10000, 20000, 30000, 40000])
         c = ffi.from_buffer(a)
         assert ffi.typeof(c) is ffi.typeof("char[]")
+        assert len(c) == 8
         ffi.cast("unsigned short *", c)[1] += 500
-        assert list(a) == [10000, 20500, 30000]
-        assert c == ffi.from_buffer(a, True)
+        assert list(a) == [10000, 20500, 30000, 40000]
+        raises(TypeError, ffi.from_buffer, a, True)
+        assert c == ffi.from_buffer("char[]", a, True)
         assert c == ffi.from_buffer(a, require_writable=True)
         #
+        c = ffi.from_buffer("unsigned short[]", a)
+        assert len(c) == 4
+        assert c[1] == 20500
+        #
+        c = ffi.from_buffer("unsigned short[2][2]", a)
+        assert len(c) == 2
+        assert len(c[0]) == 2
+        assert c[0][1] == 20500
+        #
         p = ffi.from_buffer(b"abcd")
         assert p[2] == b"c"
         #
-        assert p == ffi.from_buffer(b"abcd", False)
-        raises((TypeError, BufferError), ffi.from_buffer, b"abcd", True)
+        assert p == ffi.from_buffer(b"abcd", require_writable=False)
+        raises((TypeError, BufferError), ffi.from_buffer,
+                                         "char[]", b"abcd", True)
         raises((TypeError, BufferError), ffi.from_buffer, b"abcd",
                                          require_writable=True)
 
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -2108,3 +2108,36 @@
         else:
             assert lib.__loader__ is None
             assert lib.__spec__ is None
+
+    def test_release(self):
+        ffi, lib = self.prepare("", "test_release", "")
+        p = ffi.new("int[]", 123)
+        ffi.release(p)
+        # here, reading p[0] might give garbage or segfault...
+        ffi.release(p)   # no effect
+
+    def test_release_new_allocator(self):
+        ffi, lib = self.prepare("struct ab { int a, b; };",
+                                "test_release_new_allocator",
+                                "struct ab { int a, b; };")
+        seen = []
+        def myalloc(size):
+            seen.append(size)
+            return ffi.new("char[]", b"X" * size)
+        def myfree(raw):
+            seen.append(raw)
+        alloc2 = ffi.new_allocator(alloc=myalloc, free=myfree)
+        p = alloc2("int[]", 15)
+        assert seen == [15 * 4]
+        ffi.release(p)
+        assert seen == [15 * 4, p]
+        ffi.release(p)    # no effect
+        assert seen == [15 * 4, p]
+        #
+        del seen[:]
+        p = alloc2("struct ab *")
+        assert seen == [2 * 4]
+        ffi.release(p)
+        assert seen == [2 * 4, p]
+        ffi.release(p)    # no effect
+        assert seen == [2 * 4, p]
diff --git a/pypy/module/_collections/test/test_ordereddict.py b/pypy/module/_collections/test/test_ordereddict.py
--- a/pypy/module/_collections/test/test_ordereddict.py
+++ b/pypy/module/_collections/test/test_ordereddict.py
@@ -22,3 +22,17 @@
         assert d['x'] == 42
         d.update({'y': 2})
         assert d['y'] == 42
+
+    def test_reversed(self):
+        import sys
+        from _collections import OrderedDict
+
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        od = OrderedDict(pairs)
+        if '__pypy__' in sys.builtin_module_names:
+            # dict ordering is wrong when testing interpreted on top of CPython
+            pairs = list(dict(od).items())
+        assert list(reversed(od)) == [t[0] for t in reversed(pairs)]
+        assert list(reversed(od.keys())) == [t[0] for t in reversed(pairs)]
+        assert list(reversed(od.values())) == [t[1] for t in reversed(pairs)]
+        assert list(reversed(od.items())) == list(reversed(pairs))
diff --git a/pypy/module/_cppyy/pythonify.py b/pypy/module/_cppyy/pythonify.py
--- a/pypy/module/_cppyy/pythonify.py
+++ b/pypy/module/_cppyy/pythonify.py
@@ -1,8 +1,10 @@
 # NOT_RPYTHON
 # do not load _cppyy here, see _post_import_startup()
-import types
 import sys
 
+class _C:
+    def _m(self): pass
+MethodType = type(_C()._m)
 
 # Metaclasses are needed to store C++ static data members as properties and to
 # provide Python language features such as a customized __dir__ for namespaces
@@ -238,7 +240,7 @@
     # prepare dictionary for python-side C++ class representation
     def dispatch(self, m_name, signature):
         cppol = decl.__dispatch__(m_name, signature)
-        return types.MethodType(cppol, self, type(self))
+        return MethodType(cppol, self, type(self))
     d_class = {"__cppdecl__"   : decl,
          "__new__"      : make_new(decl),
          "__module__"   : make_module_name(scope),
diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py
--- a/pypy/module/_sre/interp_sre.py
+++ b/pypy/module/_sre/interp_sre.py
@@ -170,60 +170,48 @@
             return False
         return space.isinstance_w(self.w_pattern, space.w_unicode)
 
-    def getstring(self, w_string):
-        """Accepts a string-like object (str, bytes, bytearray, buffer...)
-        and returns a tuple (len, rpython_unicode, rpython_str, rpython_buf),
-        where only one of the rpython_xxx is non-None.
-        """
-        unicodestr = None
-        string = None
-        buf = None
-        space = self.space
-        if space.isinstance_w(w_string, space.w_unicode):
-            unicodestr = space.unicode_w(w_string)
-            length = len(unicodestr)
-        elif space.isinstance_w(w_string, space.w_bytes):
-            string = space.bytes_w(w_string)
-            length = len(string)
-        else:
-            buf = space.readbuf_w(w_string)
-            length = buf.getlength()
-            assert length >= 0
-        return (length, unicodestr, string, buf)
-
-    def make_ctx(self, w_string, pos=0, endpos=sys.maxint, flags=0):
+    def make_ctx(self, w_string, pos=0, endpos=sys.maxint):
         """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for
         searching in the given w_string object."""
         space = self.space
-        length, unicodestr, string, buf = self.getstring(w_string)
         if pos < 0:
             pos = 0
-        elif pos > length:
-            pos = length
         if endpos < pos:
             endpos = pos
-        elif endpos > length:
-            endpos = length
-        flags = self.flags | flags
-        #
-        if unicodestr is not None:
+        if space.isinstance_w(w_string, space.w_unicode):
             if self.is_known_bytes():
                 raise oefmt(space.w_TypeError,
                             "can't use a bytes pattern on a string-like "
                             "object")
-            return rsre_core.UnicodeMatchContext(unicodestr,
-                                                 pos, endpos, flags)
+            unicodestr = space.unicode_w(w_string)
+            length = len(unicodestr)
+            if pos > length:
+                pos = length
+            if endpos > length:
+                endpos = length
+            return rsre_core.UnicodeMatchContext(
+                unicodestr, pos, endpos, self.flags)
+        elif self.is_known_unicode():
+            raise oefmt(space.w_TypeError,
+                        "can't use a string pattern on a bytes-like "
+                        "object")
+        elif space.isinstance_w(w_string, space.w_bytes):
+            string = space.bytes_w(w_string)
+            length = len(string)
+            if pos > length:
+                pos = length
+            if endpos > length:
+                endpos = length
+            return rsre_core.StrMatchContext(string, pos, endpos, self.flags)
         else:
-            if self.is_known_unicode():
-                raise oefmt(space.w_TypeError,
-                            "can't use a string pattern on a bytes-like "
-                            "object")
-            if string is not None:
-                return rsre_core.StrMatchContext(string,
-                                                 pos, endpos, flags)
-            else:
-                return rsre_core.BufMatchContext(buf,
-                                                 pos, endpos, flags)
+            buf = space.readbuf_w(w_string)
+            size = buf.getlength()
+            assert size >= 0
+            if pos > size:
+                pos = size
+            if endpos > size:
+                endpos = size
+            return rsre_core.BufMatchContext(buf, pos, endpos, self.flags)
 
     def getmatch(self, ctx, found):
         if found:
@@ -336,20 +324,23 @@
         # w_string are both string or both unicode objects, and if w_ptemplate
         # is a literal
         use_builder = False
+        is_buffer = False
         filter_as_unicode = filter_as_string = None
         if space.is_true(space.callable(w_ptemplate)):
             w_filter = w_ptemplate
             filter_is_callable = True
         else:
-            length, filter_as_unicode, filter_as_string, buf = (
-                self.getstring(w_ptemplate))
-            if filter_as_unicode is not None:
+            if space.isinstance_w(w_ptemplate, space.w_unicode):
+                filter_as_unicode = space.unicode_w(w_ptemplate)
                 literal = u'\\' not in filter_as_unicode
                 use_builder = (
                     space.isinstance_w(w_string, space.w_unicode) and literal)
             else:
-                if buf is not None:
-                    filter_as_string = buf.as_str()
+                if space.isinstance_w(w_ptemplate, space.w_bytes):
+                    filter_as_string = space.bytes_w(w_ptemplate)
+                else:
+                    filter_as_string = space.readbuf_w(w_ptemplate).as_str()
+                    is_buffer = True
                 literal = '\\' not in filter_as_string
                 use_builder = (
                     space.isinstance_w(w_string, space.w_bytes) and literal)
@@ -360,7 +351,7 @@
                 # not a literal; hand it over to the template compiler
                 # FIX for a CPython 3.5 bug: if w_ptemplate is a buffer
                 # (e.g. a bytearray), convert it to a byte string here.
-                if buf is not None:
+                if is_buffer:
                     w_ptemplate = space.newbytes(filter_as_string)
                 w_re = import_re(space)
                 w_filter = space.call_method(w_re, '_subx',
diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -28,10 +28,12 @@
 /* Version as a string */
 #define PY_VERSION		"3.6.1"
 
-/* PyPy version as a string */
-#define PYPY_VERSION "6.1.0-alpha0"
-#define PYPY_VERSION_NUM  0x06010000
-
+/* PyPy version as a string: make sure to keep this in sync with:
+ *     module/sys/version.py
+ *     doc/conf.py
+ */
+#define PYPY_VERSION "7.1.0-alpha0"
+#define PYPY_VERSION_NUM  0x07010000
 /* Defined to mean a PyPy where cpyext holds more regular references
    to PyObjects, e.g. staying alive as long as the internal PyPy object
    stays alive. */
diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py
--- a/pypy/module/gc/__init__.py
+++ b/pypy/module/gc/__init__.py
@@ -4,7 +4,6 @@
 class Module(MixedModule):
     interpleveldefs = {
         'collect': 'interp_gc.collect',
-        'collect_step': 'interp_gc.collect_step',
         'enable': 'interp_gc.enable',
         'disable': 'interp_gc.disable',
         'isenabled': 'interp_gc.isenabled',
@@ -23,6 +22,7 @@
                 'get_stats': 'app_referents.get_stats',
                 })
             self.interpleveldefs.update({
+                'collect_step': 'interp_gc.collect_step',
                 'get_rpy_roots': 'referents.get_rpy_roots',
                 'get_rpy_referents': 'referents.get_rpy_referents',
                 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage',
diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py
--- a/pypy/module/pypyjit/test_pypy_c/test_containers.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py
@@ -20,7 +20,7 @@
         assert log.result % 1000 == 0
         loop, = log.loops_by_filename(self.filepath)
         ops = loop.ops_by_id('look')
-        assert log.opnames(ops) == []
+        assert log.opnames(ops) == ['guard_nonnull_class']
 
     def test_identitydict(self):
         def fn(n):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py
--- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py
@@ -407,6 +407,7 @@
         i138 = call_i(ConstClass(_ll_1_raw_malloc_varsize_zero__Signed), 6, descr=...)
         check_memory_error(i138)
         setfield_gc(p132, i138, descr=...)
+        setfield_gc(p132, 0, descr=...)
         setfield_gc(p132, ConstPtr(ptr139), descr=...)
         setfield_gc(p132, -1, descr=...)
         setfield_gc(p0, p133, descr=...)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
--- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
@@ -244,6 +244,8 @@
             f80 = raw_load_f(i67, i79, descr=<ArrayF 8>)
             i81 = int_add(i71, 1)
             --TICK--
+            i92 = int_le(i33, _)
+            guard_true(i92, descr=...)
             jump(..., descr=...)
         """)
 
@@ -283,6 +285,8 @@
             f86 = float_add(f74, f85)
             i87 = int_add(i76, 1)
             --TICK--
+            i98 = int_le(i36, _)
+            guard_true(i98, descr=...)
             jump(..., descr=...)
         """)
 
@@ -390,6 +394,8 @@
         assert log.result == [0.] * N
         loop, = log.loops_by_filename(self.filepath)
         assert loop.match("""
+            i4 = int_lt(i91, 0)
+            guard_false(i4, descr=...)
             i92 = int_ge(i91, i37)
             guard_false(i92, descr=...)
             i93 = int_add(i91, 1)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py
--- a/pypy/module/pypyjit/test_pypy_c/test_misc.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py
@@ -113,6 +113,7 @@
             i12 = int_is_true(i4)
             guard_true(i12, descr=...)
             guard_not_invalidated(descr=...)
+            guard_nonnull_class(p10, ConstClass(W_IntObject), descr=...)
             i10p = getfield_gc_i(p10, descr=...)
             i10 = int_mul_ovf(2, i10p)
             guard_no_overflow(descr=...)
@@ -146,12 +147,16 @@
     RANGE_ITER_STEP_1 = """
             guard_not_invalidated?
             # W_IntRangeStepOneIterator.next()
+            i80 = int_lt(i11, 0)
+            guard_false(i80, descr=...)
             i16 = int_lt(i11, i12)
             guard_true(i16, descr=...)
             i20 = int_add(i11, 1)
             setfield_gc(p4, i20, descr=<.* .*W_IntRangeIterator.inst_current .*>)
             guard_not_invalidated?
             i21 = force_token()
+            i89 = int_lt(0, i9)
+            guard_true(i89, descr=...)
             i88 = int_sub(i9, 1)
 
             # Compared with pypy2, we get these two operations extra.
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -10,7 +10,10 @@
 #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
 CPYTHON_API_VERSION        = 1013   #XXX # sync with include/modsupport.h
 
-PYPY_VERSION               = (6, 1, 0, "alpha", 0)    #XXX # sync patchlevel.h
+# make sure to keep PYPY_VERSION in sync with:
+#    module/cpyext/include/patchlevel.h
+#    doc/conf.py
+PYPY_VERSION               = (7, 1, 0, "alpha", 0)
 
 
 import pypy
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -11,7 +11,7 @@
     WrappedDefault, applevel, interp2app, unwrap_spec)
 from pypy.interpreter.mixedmodule import MixedModule
 from pypy.interpreter.signature import Signature
-from pypy.interpreter.typedef import TypeDef
+from pypy.interpreter.typedef import TypeDef, interp_attrproperty_w
 from pypy.interpreter.unicodehelper import decode_utf8
 from pypy.objspace.std.util import negate
 
@@ -1538,6 +1538,12 @@
     descr_or, descr_ror = _as_set_op('or', 'update')
     descr_xor, descr_rxor = _as_set_op('xor', 'symmetric_difference_update')
 
+def new_dict_items(space, w_type, w_dict):
+    w_dict = space.interp_w(W_DictMultiObject, w_dict)
+    w_obj = space.allocate_instance(W_DictViewItemsObject, w_type)
+    W_DictViewObject.__init__(w_obj, space, w_dict)
+    return w_obj
+
 class W_DictViewItemsObject(W_DictViewObject, SetLikeDictView):
     def descr_iter(self, space):
         return W_DictMultiIterItemsObject(space, self.w_dict.iteritems())
@@ -1557,18 +1563,32 @@
             return space.w_False
         return space.newbool(space.eq_w(w_value, w_found))
 
+def new_dict_keys(space, w_type, w_dict):
+    w_dict = space.interp_w(W_DictMultiObject, w_dict)
+    w_obj = space.allocate_instance(W_DictViewKeysObject, w_type)
+    W_DictViewObject.__init__(w_obj, space, w_dict)
+    return w_obj
+
 class W_DictViewKeysObject(W_DictViewObject, SetLikeDictView):
     def descr_iter(self, space):
         return W_DictMultiIterKeysObject(space, self.w_dict.iterkeys())
+
     def descr_contains(self, space, w_key):
         return self.w_dict.descr_contains(space, w_key)
 
+def new_dict_values(space, w_type, w_dict):
+    w_dict = space.interp_w(W_DictMultiObject, w_dict)
+    w_obj = space.allocate_instance(W_DictViewValuesObject, w_type)
+    W_DictViewObject.__init__(w_obj, space, w_dict)
+    return w_obj
+
 class W_DictViewValuesObject(W_DictViewObject):
     def descr_iter(self, space):
         return W_DictMultiIterValuesObject(space, self.w_dict.itervalues())
 
 W_DictViewItemsObject.typedef = TypeDef(
     "dict_items",
+    __new__ = interp2app(new_dict_items),
     __repr__ = interp2app(W_DictViewItemsObject.descr_repr),
     __len__ = interp2app(W_DictViewItemsObject.descr_len),
     __iter__ = interp2app(W_DictViewItemsObject.descr_iter),
@@ -1590,10 +1610,12 @@
     __xor__ = interp2app(W_DictViewItemsObject.descr_xor),
     __rxor__ = interp2app(W_DictViewItemsObject.descr_rxor),
     isdisjoint = interp2app(W_DictViewItemsObject.descr_isdisjoint),
+    _dict = interp_attrproperty_w('w_dict', cls=W_DictViewItemsObject),
     )
 
 W_DictViewKeysObject.typedef = TypeDef(
     "dict_keys",
+    __new__ = interp2app(new_dict_keys),
     __repr__ = interp2app(W_DictViewKeysObject.descr_repr),
     __len__ = interp2app(W_DictViewKeysObject.descr_len),
     __iter__ = interp2app(W_DictViewKeysObject.descr_iter),
@@ -1615,11 +1637,14 @@
     __xor__ = interp2app(W_DictViewKeysObject.descr_xor),
     __rxor__ = interp2app(W_DictViewKeysObject.descr_rxor),
     isdisjoint = interp2app(W_DictViewKeysObject.descr_isdisjoint),
+    _dict = interp_attrproperty_w('w_dict', cls=W_DictViewKeysObject),
     )
 
 W_DictViewValuesObject.typedef = TypeDef(
     "dict_values",
+    __new__ = interp2app(new_dict_values),
     __repr__ = interp2app(W_DictViewValuesObject.descr_repr),
     __len__ = interp2app(W_DictViewValuesObject.descr_len),
     __iter__ = interp2app(W_DictViewValuesObject.descr_iter),
+    _dict = interp_attrproperty_w('w_dict', cls=W_DictViewValuesObject),
     )
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -787,17 +787,6 @@
         assert len(d.items()) == 2
         assert len(d.values()) == 2
 
-    def test_constructors_not_callable(self):
-        kt = type({}.keys())
-        raises(TypeError, kt, {})
-        raises(TypeError, kt)
-        it = type({}.items())
-        raises(TypeError, it, {})
-        raises(TypeError, it)
-        vt = type({}.values())
-        raises(TypeError, vt, {})
-        raises(TypeError, vt)
-
     def test_dict_keys(self):
         d = {1: 10, "a": "ABC"}
         keys = d.keys()
@@ -1177,7 +1166,6 @@
         assert type(list(d.keys())[0]) is str
 
     def test_empty_to_int(self):
-        skip('IntDictStrategy is disabled for now, re-enable it!')
         import sys
         d = {}
         d[1] = "hi"
diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py
--- a/pypy/testrunner_cfg.py
+++ b/pypy/testrunner_cfg.py
@@ -6,6 +6,8 @@
     'memory/test', 'jit/metainterp',
     'jit/backend/arm', 'jit/backend/x86',
     'jit/backend/zarch', 'module/cpyext/test',
+    # python3 slowness ...
+    'module/_cffi_backend/test', 'module/__pypy__/test',
 ]
 
 def collect_one_testdir(testdirs, reldir, tests):
diff --git a/rpython/conftest.py b/rpython/conftest.py
--- a/rpython/conftest.py
+++ b/rpython/conftest.py
@@ -5,6 +5,18 @@
 
 option = None
 
+try:
+    from hypothesis import settings, __version__
+except ImportError:
+    pass
+else:
+    if __version__[:2] < '3.6':
+        s = settings(deadline=None)
+        settings.register_profile('default', s)
+    else:
+        settings.register_profile('default', deadline=None)
+    settings.load_profile('default')
+
 def braindead_deindent(self):
     """monkeypatch that wont end up doing stupid in the python tokenizer"""
     text = '\n'.join(self.lines)
diff --git a/rpython/jit/metainterp/test/test_zvector.py b/rpython/jit/metainterp/test/test_zvector.py
--- a/rpython/jit/metainterp/test/test_zvector.py
+++ b/rpython/jit/metainterp/test/test_zvector.py
@@ -81,56 +81,41 @@
         if not self.supports_vector_ext():
             py.test.skip("this cpu %s has no implemented vector backend" % CPU)
 
-    def meta_interp(self, f, args, policy=None, vec=True, vec_all=False):
-        return ll_meta_interp(f, args, enable_opts=self.enable_opts,
-                              policy=policy,
-                              CPUClass=self.CPUClass,
-                              type_system=self.type_system,
-                              vec=vec, vec_all=vec_all)
-
     # FLOAT UNARY
 
-    def _vector_float_unary(self, func, type, data):
+    @pytest.mark.parametrize('func',
+        [lambda v: abs(v), lambda v: -v],
+        ids=['abs', 'neg'])
+    @given(la=st.lists(st.floats(), min_size=10, max_size=150))
+    def test_vector_float_unary(self, func, la):
         func = always_inline(func)
+        tp = rffi.DOUBLE
 
-        size = rffi.sizeof(type)
-        myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
+        size = rffi.sizeof(tp)
+        myjitdriver = JitDriver(greens=[], reds='auto', vectorize=True)
+
         def f(bytecount, va, vc):
             i = 0
             while i < bytecount:
                 myjitdriver.jit_merge_point()
-                a = raw_storage_getitem(type,va,i)
+                a = raw_storage_getitem(tp, va, i)
                 c = func(a)
-                raw_storage_setitem(vc, i, rffi.cast(type,c))
+                raw_storage_setitem(vc, i, rffi.cast(tp, c))
                 i += size
 
-        la = data.draw(st.lists(st.floats(), min_size=10, max_size=150))
         l = len(la)
-
         rawstorage = RawStorage()
-        va = rawstorage.new(la, type)
-        vc = rawstorage.new(None, type, size=l)
-        self.meta_interp(f, [l*size, va, vc], vec=True)
+        va = rawstorage.new(la, tp)
+        vc = rawstorage.new(None, tp, size=l)
+        self.meta_interp(f, [l * size, va, vc], vec=True)
 
         for i in range(l):
-            c = raw_storage_getitem(type,vc,i*size)
+            c = raw_storage_getitem(tp, vc, i * size)
             r = func(la[i])
             assert isclose(r, c)
 
         rawstorage.clear()
 
-    def vec_int_unary(test_func, unary_func, type):
-        return pytest.mark.parametrize('func,type', [
-            (unary_func, type)
-        ])(given(data=st.data())(test_func))
-
-    vec_float_unary = functools.partial(vec_int_unary, _vector_float_unary)
-
-    test_vec_float_abs = \
-            vec_float_unary(lambda v: abs(v), rffi.DOUBLE)
-    test_vec_float_neg = \
-            vec_float_unary(lambda v: -v, rffi.DOUBLE)
-
     # FLOAT BINARY
 
     def _vector_simple_float(self, func, type, data):
@@ -376,38 +361,37 @@
         res = self.meta_interp(f, [count], vec=True)
         assert res == f(count) == breaks
 
-    def _vec_reduce(self, strat, func, type, data):
-        func = always_inline(func)
+    def vec_reduce(strat, arith_func, tp):
+        @pytest.mark.parametrize('func, tp', [
+            (arith_func, tp)
+        ])
+        @given(la=st.lists(strat, min_size=11, max_size=150))
+        def _vec_reduce(self, func, tp, la):
+            func = always_inline(func)
 
-        size = rffi.sizeof(type)
-        myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
-        def f(accum, bytecount, v):
-            i = 0
-            while i < bytecount:
-                myjitdriver.jit_merge_point()
-                e = raw_storage_getitem(type,v,i)
-                accum = func(accum,e)
-                i += size
-            return accum
+            size = rffi.sizeof(tp)
+            myjitdriver = JitDriver(greens=[], reds='auto', vectorize=True)
 
-        la = data.draw(st.lists(strat, min_size=10, max_size=150))
-        #la = [1.0] * 10
-        l = len(la)
+            def f(accum, bytecount, v):
+                i = 0
+                while i < bytecount:
+                    myjitdriver.jit_merge_point()
+                    e = raw_storage_getitem(tp, v, i)
+                    accum = func(accum, e)
+                    i += size
+                return accum
 
-        accum = data.draw(strat)
-        rawstorage = RawStorage()
-        va = rawstorage.new(la, type)
-        res = self.meta_interp(f, [accum, l*size, va], vec=True)
+            accum = la[0]
+            la = la[1:]
+            l = len(la)
+            rawstorage = RawStorage()
+            va = rawstorage.new(la, tp)
+            res = self.meta_interp(f, [accum, l * size, va], vec=True)
 
-        assert isclose(rffi.cast(type, res), f(accum, l*size, va))
+            assert isclose(rffi.cast(tp, res), f(accum, l * size, va))
 
-        rawstorage.clear()
-
-    def vec_reduce(test_func, strat, arith_func, type):
-        return pytest.mark.parametrize('strat,func,type', [
-            (strat, arith_func, type)
-        ])(given(data=st.data())(test_func))
-    vec_reduce = functools.partial(vec_reduce, _vec_reduce)
+            rawstorage.clear()
+        return _vec_reduce
 
     test_vec_int_sum = vec_reduce(st.integers(min_value=-2**(64-1), max_value=2**(64-1)-1),
                              lambda a,b: lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed)
diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -774,7 +774,7 @@
     def test_collect_0(self, debuglog):
         self.gc.collect(1) # start a major
         debuglog.reset()
-        self.gc.collect(0) # do ONLY a minor
+        self.gc.collect(-1) # do ONLY a minor
         assert debuglog.summary() == {'gc-minor': 1}
 
     def test_enable_disable(self, debuglog):
diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py
--- a/rpython/rlib/objectmodel.py
+++ b/rpython/rlib/objectmodel.py
@@ -991,7 +991,9 @@
         items = d.items()
         d.clear()
         d[key] = value
-        d.update(items)
+        # r_dict.update does not support list of tuples, do it manually
+        for key, value in items:
+            d[key] = value
 
 @specialize.call_location()
 def move_to_end(d, key, last=True):
diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py
--- a/rpython/rlib/test/test_objectmodel.py
+++ b/rpython/rlib/test/test_objectmodel.py
@@ -708,6 +708,15 @@
     move_to_end(d, 'key1', last=False)
     assert d.items() == [('key1', 'val1'), ('key2', 'val2'), ('key3', 'val3')]
 
+def test_r_dict_move_to_end():
+    d = r_dict(strange_key_eq, strange_key_hash)
+    d['1key'] = 'val1'
+    d['2key'] = 'val2'
+    d['3key'] = 'val3'
+    # does not crash, we can't check that it actually moves to end on CPython
+    move_to_end(d, '1key')
+    move_to_end(d, '1key', last=False)
+
 def test_import_from_mixin():
     class M:    # old-style
         def f(self):
diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py
--- a/rpython/rtyper/test/test_rdict.py
+++ b/rpython/rtyper/test/test_rdict.py
@@ -1,6 +1,7 @@
 import sys
 from contextlib import contextmanager
 import signal
+from collections import OrderedDict
 
 from rpython.translator.translator import TranslationContext
 from rpython.annotator.model import (
@@ -1196,7 +1197,7 @@
                         DictValue(None, s_value))
         dictrepr.setup()
         self.l_dict = self.newdict(dictrepr)
-        self.reference = self.new_reference()
+        self.reference = OrderedDict()
         self.ll_key = r_key.convert_const
         self.ll_value = r_value.convert_const
         self.removed_keys = []
@@ -1323,7 +1324,6 @@
 
 class DictSpace(MappingSpace):
     MappingRepr = rdict.DictRepr
-    new_reference = dict
     ll_getitem = staticmethod(rdict.ll_dict_getitem)
     ll_setitem = staticmethod(rdict.ll_dict_setitem)
     ll_delitem = staticmethod(rdict.ll_dict_delitem)
diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py
--- a/rpython/rtyper/test/test_rordereddict.py
+++ b/rpython/rtyper/test/test_rordereddict.py
@@ -422,7 +422,6 @@
 
 class ODictSpace(MappingSpace):
     MappingRepr = rodct.OrderedDictRepr
-    new_reference = OrderedDict
     moved_around = False
     ll_getitem = staticmethod(rodct.ll_dict_getitem)
     ll_setitem = staticmethod(rodct.ll_dict_setitem)
diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h
--- a/rpython/translator/c/src/mem.h
+++ b/rpython/translator/c/src/mem.h
@@ -112,6 +112,8 @@
 #define OP_GC__DISABLE_FINALIZERS(r)  boehm_gc_finalizer_lock++
 #define OP_GC__ENABLE_FINALIZERS(r)  (boehm_gc_finalizer_lock--,	\
 				      boehm_gc_finalizer_notifier())
+#define OP_GC__DISABLE(r)             /* nothing */
+#define OP_GC__ENABLE(r)              /* nothing */
 
 #define OP_BOEHM_FQ_REGISTER(tagindex, obj, r)                          \
     boehm_fq_register(boehm_fq_queues + tagindex, obj)
@@ -127,6 +129,8 @@
 #define OP_BOEHM_DISAPPEARING_LINK(link, obj, r)  /* nothing */
 #define OP_GC__DISABLE_FINALIZERS(r)  /* nothing */
 #define OP_GC__ENABLE_FINALIZERS(r)  /* nothing */
+#define OP_GC__DISABLE(r)             /* nothing */
+#define OP_GC__ENABLE(r)              /* nothing */
 #define GC_REGISTER_FINALIZER(a, b, c, d, e)  /* nothing */
 #define GC_gcollect()  /* nothing */
 #define GC_set_max_heap_size(a)  /* nothing */
diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py
--- a/rpython/translator/platform/windows.py
+++ b/rpython/translator/platform/windows.py
@@ -9,14 +9,14 @@
 import rpython
 rpydir = str(py.path.local(rpython.__file__).join('..'))
 
-def _get_compiler_type(cc, x64_flag, ver0=None):
+def _get_compiler_type(cc, x64_flag):
     if not cc:
         cc = os.environ.get('CC','')
     if not cc:
-        return MsvcPlatform(x64=x64_flag, ver0=ver0)
+        return MsvcPlatform(x64=x64_flag)
     elif cc.startswith('mingw') or cc == 'gcc':
         return MingwPlatform(cc)
-    return MsvcPlatform(cc=cc, x64=x64_flag, ver0=ver0)
+    return MsvcPlatform(cc=cc, x64=x64_flag)
 
 def _get_vcver0():
     # try to get the compiler which served to compile python
@@ -28,17 +28,13 @@
         return vsver
     return None


More information about the pypy-commit mailing list