[pypy-commit] pypy armhf-singlefloat: merge default

bivab noreply at buildbot.pypy.org
Tue Nov 19 09:31:44 CET 2013


Author: David Schneider <david.schneider at picle.org>
Branch: armhf-singlefloat
Changeset: r68234:9f4093666c5d
Date: 2013-11-19 09:28 +0100
http://bitbucket.org/pypy/pypy/changeset/9f4093666c5d/

Log:	merge default

diff too long, truncating to 2000 out of 4681 lines

diff --git a/README.rst b/README.rst
--- a/README.rst
+++ b/README.rst
@@ -33,7 +33,7 @@
     $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py
 
 This ends up with ``pypy-c`` binary in the main pypy directory. We suggest
-to use virtualenv with the resulting pypy-c as the interpreter, you can
+to use virtualenv with the resulting pypy-c as the interpreter; you can
 find more details about various installation schemes here:
 
 http://doc.pypy.org/en/latest/getting-started.html#installing-pypy
diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py
--- a/lib-python/2.7/test/test_mailbox.py
+++ b/lib-python/2.7/test/test_mailbox.py
@@ -38,14 +38,9 @@
     def _delete_recursively(self, target):
         # Delete a file or delete a directory recursively
         if os.path.isdir(target):
-            for path, dirs, files in os.walk(target, topdown=False):
-                for name in files:
-                    os.remove(os.path.join(path, name))
-                for name in dirs:
-                    os.rmdir(os.path.join(path, name))
-            os.rmdir(target)
+            test_support.rmtree(target)
         elif os.path.exists(target):
-            os.remove(target)
+            test_support.unlink(target)
 
 
 class TestMailbox(TestBase):
@@ -137,6 +132,7 @@
         msg = self._box.get(key1)
         self.assertEqual(msg['from'], 'foo')
         self.assertEqual(msg.fp.read(), '1')
+        msg.fp.close()
 
     def test_getitem(self):
         # Retrieve message using __getitem__()
@@ -169,10 +165,14 @@
         # Get file representations of messages
         key0 = self._box.add(self._template % 0)
         key1 = self._box.add(_sample_message)
-        self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'),
+        msg0 = self._box.get_file(key0)
+        self.assertEqual(msg0.read().replace(os.linesep, '\n'),
                          self._template % 0)
-        self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'),
+        msg1 = self._box.get_file(key1)
+        self.assertEqual(msg1.read().replace(os.linesep, '\n'),
                          _sample_message)
+        msg0.close()
+        msg1.close()
 
     def test_get_file_can_be_closed_twice(self):
         # Issue 11700
@@ -407,6 +407,7 @@
         self._box.add(contents[0])
         self._box.add(contents[1])
         self._box.add(contents[2])
+        oldbox = self._box
         method()
         if should_call_close:
             self._box.close()
@@ -415,6 +416,7 @@
         self.assertEqual(len(keys), 3)
         for key in keys:
             self.assertIn(self._box.get_string(key), contents)
+        oldbox.close()
 
     def test_dump_message(self):
         # Write message representations to disk
@@ -1835,6 +1837,10 @@
     def setUp(self):
         # create a new maildir mailbox to work with:
         self._dir = test_support.TESTFN
+        if os.path.isdir(self._dir):
+            test_support.rmtree(self._dir)
+        if os.path.isfile(self._dir):
+            test_support.unlink(self._dir)
         os.mkdir(self._dir)
         os.mkdir(os.path.join(self._dir, "cur"))
         os.mkdir(os.path.join(self._dir, "tmp"))
@@ -1844,10 +1850,10 @@
 
     def tearDown(self):
         map(os.unlink, self._msgfiles)
-        os.rmdir(os.path.join(self._dir, "cur"))
-        os.rmdir(os.path.join(self._dir, "tmp"))
-        os.rmdir(os.path.join(self._dir, "new"))
-        os.rmdir(self._dir)
+        test_support.rmdir(os.path.join(self._dir, "cur"))
+        test_support.rmdir(os.path.join(self._dir, "tmp"))
+        test_support.rmdir(os.path.join(self._dir, "new"))
+        test_support.rmdir(self._dir)
 
     def createMessage(self, dir, mbox=False):
         t = int(time.time() % 1000000)
@@ -1883,7 +1889,9 @@
         self.createMessage("cur")
         self.mbox = mailbox.Maildir(test_support.TESTFN)
         #self.assertTrue(len(self.mbox.boxes) == 1)
-        self.assertIsNot(self.mbox.next(), None)
+        msg = self.mbox.next()
+        self.assertIsNot(msg, None)
+        msg.fp.close()
         self.assertIs(self.mbox.next(), None)
         self.assertIs(self.mbox.next(), None)
 
@@ -1891,7 +1899,9 @@
         self.createMessage("new")
         self.mbox = mailbox.Maildir(test_support.TESTFN)
         #self.assertTrue(len(self.mbox.boxes) == 1)
-        self.assertIsNot(self.mbox.next(), None)
+        msg = self.mbox.next()
+        self.assertIsNot(msg, None)
+        msg.fp.close()
         self.assertIs(self.mbox.next(), None)
         self.assertIs(self.mbox.next(), None)
 
@@ -1900,8 +1910,12 @@
         self.createMessage("new")
         self.mbox = mailbox.Maildir(test_support.TESTFN)
         #self.assertTrue(len(self.mbox.boxes) == 2)
-        self.assertIsNot(self.mbox.next(), None)
-        self.assertIsNot(self.mbox.next(), None)
+        msg = self.mbox.next()
+        self.assertIsNot(msg, None)
+        msg.fp.close()
+        msg = self.mbox.next()
+        self.assertIsNot(msg, None)
+        msg.fp.close()
         self.assertIs(self.mbox.next(), None)
         self.assertIs(self.mbox.next(), None)
 
@@ -1910,11 +1924,13 @@
         import email.parser
         fname = self.createMessage("cur", True)
         n = 0
-        for msg in mailbox.PortableUnixMailbox(open(fname),
+        fid = open(fname)
+        for msg in mailbox.PortableUnixMailbox(fid,
                                                email.parser.Parser().parse):
             n += 1
             self.assertEqual(msg["subject"], "Simple Test")
             self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE))
+        fid.close()
         self.assertEqual(n, 1)
 
 ## End: classes from the original module (for backward compatibility).
diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py
--- a/lib-python/2.7/test/test_mmap.py
+++ b/lib-python/2.7/test/test_mmap.py
@@ -11,7 +11,7 @@
 
     def setUp(self):
         if os.path.exists(TESTFN):
-            os.unlink(TESTFN)
+            unlink(TESTFN)
 
     def tearDown(self):
         try:
diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py
--- a/lib-python/2.7/test/test_old_mailbox.py
+++ b/lib-python/2.7/test/test_old_mailbox.py
@@ -73,7 +73,9 @@
         self.createMessage("cur")
         self.mbox = mailbox.Maildir(test_support.TESTFN)
         self.assertTrue(len(self.mbox) == 1)
-        self.assertTrue(self.mbox.next() is not None)
+        msg = self.mbox.next()
+        self.assertTrue(msg is not None)
+        msg.fp.close()
         self.assertTrue(self.mbox.next() is None)
         self.assertTrue(self.mbox.next() is None)
 
@@ -81,7 +83,9 @@
         self.createMessage("new")
         self.mbox = mailbox.Maildir(test_support.TESTFN)
         self.assertTrue(len(self.mbox) == 1)
-        self.assertTrue(self.mbox.next() is not None)
+        msg = self.mbox.next()
+        self.assertTrue(msg is not None)
+        msg.fp.close()
         self.assertTrue(self.mbox.next() is None)
         self.assertTrue(self.mbox.next() is None)
 
@@ -90,8 +94,12 @@
         self.createMessage("new")
         self.mbox = mailbox.Maildir(test_support.TESTFN)
         self.assertTrue(len(self.mbox) == 2)
-        self.assertTrue(self.mbox.next() is not None)
-        self.assertTrue(self.mbox.next() is not None)
+        msg = self.mbox.next()
+        self.assertTrue(msg is not None)
+        msg.fp.close()
+        msg = self.mbox.next()
+        self.assertTrue(msg is not None)
+        msg.fp.close()
         self.assertTrue(self.mbox.next() is None)
         self.assertTrue(self.mbox.next() is None)
 
diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py
--- a/lib-python/2.7/test/test_os.py
+++ b/lib-python/2.7/test/test_os.py
@@ -75,7 +75,7 @@
         self.assertFalse(os.path.exists(name),
                     "file already exists for temporary file")
         # make sure we can create the file
-        open(name, "w")
+        open(name, "w").close()
         self.files.append(name)
 
     def test_tempnam(self):
diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py
--- a/lib-python/2.7/test/test_support.py
+++ b/lib-python/2.7/test/test_support.py
@@ -179,15 +179,79 @@
     except KeyError:
         pass
 
+if sys.platform.startswith("win"):
+    def _waitfor(func, pathname, waitall=False):
+        # Peform the operation
+        func(pathname)
+        # Now setup the wait loop
+        if waitall:
+            dirname = pathname
+        else:
+            dirname, name = os.path.split(pathname)
+            dirname = dirname or '.'
+        # Check for `pathname` to be removed from the filesystem.
+        # The exponential backoff of the timeout amounts to a total
+        # of ~1 second after which the deletion is probably an error
+        # anyway.
+        # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is
+        # required when contention occurs.
+        timeout = 0.001
+        while timeout < 1.0:
+            # Note we are only testing for the existance of the file(s) in
+            # the contents of the directory regardless of any security or
+            # access rights.  If we have made it this far, we have sufficient
+            # permissions to do that much using Python's equivalent of the
+            # Windows API FindFirstFile.
+            # Other Windows APIs can fail or give incorrect results when
+            # dealing with files that are pending deletion.
+            L = os.listdir(dirname)
+            if not (L if waitall else name in L):
+                return
+            # Increase the timeout and try again
+            time.sleep(timeout)
+            timeout *= 2
+        warnings.warn('tests may fail, delete still pending for ' + pathname,
+                      RuntimeWarning, stacklevel=4)
+
+    def _unlink(filename):
+        _waitfor(os.unlink, filename)
+
+    def _rmdir(dirname):
+        _waitfor(os.rmdir, dirname)
+
+    def _rmtree(path):
+        def _rmtree_inner(path):
+            for name in os.listdir(path):
+                fullname = os.path.join(path, name)
+                if os.path.isdir(fullname):
+                    _waitfor(_rmtree_inner, fullname, waitall=True)
+                    os.rmdir(fullname)
+                else:
+                    os.unlink(fullname)
+        _waitfor(_rmtree_inner, path, waitall=True)
+        _waitfor(os.rmdir, path)
+else:
+    _unlink = os.unlink
+    _rmdir = os.rmdir
+    _rmtree = shutil.rmtree
+
 def unlink(filename):
     try:
-        os.unlink(filename)
+        _unlink(filename)
     except OSError:
         pass
 
+def rmdir(dirname):
+    try:
+        _rmdir(dirname)
+    except OSError as error:
+        # The directory need not exist.
+        if error.errno != errno.ENOENT:
+            raise
+
 def rmtree(path):
     try:
-        shutil.rmtree(path)
+        _rmtree(path)
     except OSError, e:
         # Unix returns ENOENT, Windows returns ESRCH.
         if e.errno not in (errno.ENOENT, errno.ESRCH):
diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py
--- a/lib-python/2.7/test/test_tarfile.py
+++ b/lib-python/2.7/test/test_tarfile.py
@@ -300,26 +300,21 @@
 
     def test_extract_hardlink(self):
         # Test hardlink extraction (e.g. bug #857297).
-        tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1")
+        with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar:
+            tar.extract("ustar/regtype", TEMPDIR)
+            self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype"))
 
-        tar.extract("ustar/regtype", TEMPDIR)
-        try:
             tar.extract("ustar/lnktype", TEMPDIR)
-        except EnvironmentError, e:
-            if e.errno == errno.ENOENT:
-                self.fail("hardlink not extracted properly")
+            self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype"))
+            with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f:
+                data = f.read()
+            self.assertEqual(md5sum(data), md5_regtype)
 
-        data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read()
-        self.assertEqual(md5sum(data), md5_regtype)
-
-        try:
             tar.extract("ustar/symtype", TEMPDIR)
-        except EnvironmentError, e:
-            if e.errno == errno.ENOENT:
-                self.fail("symlink not extracted properly")
-
-        data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read()
-        self.assertEqual(md5sum(data), md5_regtype)
+            self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype"))
+            with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f:
+                data = f.read()
+            self.assertEqual(md5sum(data), md5_regtype)
 
     def test_extractall(self):
         # Test if extractall() correctly restores directory permissions
@@ -340,7 +335,7 @@
         # constructor in case of an error. For the test we rely on
         # the fact that opening an empty file raises a ReadError.
         empty = os.path.join(TEMPDIR, "empty")
-        open(empty, "wb").write("")
+        open(empty, "wb").close()
 
         try:
             tar = object.__new__(tarfile.TarFile)
@@ -351,7 +346,7 @@
             else:
                 self.fail("ReadError not raised")
         finally:
-            os.remove(empty)
+            test_support.unlink(empty)
 
 
 class StreamReadTest(CommonReadTest):
@@ -1327,7 +1322,7 @@
     def setUp(self):
         self.tarname = tmpname
         if os.path.exists(self.tarname):
-            os.remove(self.tarname)
+            test_support.unlink(self.tarname)
 
     def _add_testfile(self, fileobj=None):
         tar = tarfile.open(self.tarname, "a", fileobj=fileobj)
diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py
--- a/lib-python/2.7/traceback.py
+++ b/lib-python/2.7/traceback.py
@@ -107,7 +107,7 @@
     return list
 
 
-def print_exception(etype, value, tb, limit=None, file=None):
+def print_exception(etype, value, tb, limit=None, file=None, _encoding=None):
     """Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
 
     This differs from print_tb() in the following ways: (1) if
@@ -123,7 +123,7 @@
     if tb:
         _print(file, 'Traceback (most recent call last):')
         print_tb(tb, limit, file)
-    lines = format_exception_only(etype, value)
+    lines = format_exception_only(etype, value, _encoding)
     for line in lines:
         _print(file, line, '')
 
@@ -144,7 +144,7 @@
     list = list + format_exception_only(etype, value)
     return list
 
-def format_exception_only(etype, value):
+def format_exception_only(etype, value, _encoding=None):
     """Format the exception part of a traceback.
 
     The arguments are the exception type and value such as given by
@@ -170,12 +170,12 @@
     if (isinstance(etype, BaseException) or
         isinstance(etype, types.InstanceType) or
         etype is None or type(etype) is str):
-        return [_format_final_exc_line(etype, value)]
+        return [_format_final_exc_line(etype, value, _encoding)]
 
     stype = etype.__name__
 
     if not issubclass(etype, SyntaxError):
-        return [_format_final_exc_line(stype, value)]
+        return [_format_final_exc_line(stype, value, _encoding)]
 
     # It was a syntax error; show exactly where the problem was found.
     lines = []
@@ -196,26 +196,26 @@
                 lines.append('   %s^\n' % ''.join(caretspace))
         value = msg
 
-    lines.append(_format_final_exc_line(stype, value))
+    lines.append(_format_final_exc_line(stype, value, _encoding))
     return lines
 
-def _format_final_exc_line(etype, value):
+def _format_final_exc_line(etype, value, _encoding=None):
     """Return a list of a single line -- normal case for format_exception_only"""
-    valuestr = _some_str(value)
+    valuestr = _some_str(value, _encoding)
     if value is None or not valuestr:
         line = "%s\n" % etype
     else:
         line = "%s: %s\n" % (etype, valuestr)
     return line
 
-def _some_str(value):
+def _some_str(value, _encoding=None):
     try:
         return str(value)
     except Exception:
         pass
     try:
         value = unicode(value)
-        return value.encode("ascii", "backslashreplace")
+        return value.encode(_encoding or "ascii", "backslashreplace")
     except Exception:
         pass
     return '<unprintable %s object>' % type(value).__name__
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -268,10 +268,18 @@
 if _has_load_extension():
     _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);")
 
-_lib = _ffi.verify("""
-#include <sqlite3.h>
-""", libraries=['sqlite3']
-)
+if sys.platform.startswith('freebsd'):
+    _lib = _ffi.verify("""
+    #include <sqlite3.h>
+    """, libraries=['sqlite3'],
+         include_dirs=['/usr/local/include'],
+         library_dirs=['/usr/local/lib']
+    )
+else:
+    _lib = _ffi.verify("""
+    #include <sqlite3.h>
+    """, libraries=['sqlite3']
+    )
 
 exported_sqlite_symbols = [
     'SQLITE_ALTER_TABLE',
diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py
--- a/lib_pypy/_tkinter/tklib.py
+++ b/lib_pypy/_tkinter/tklib.py
@@ -112,6 +112,14 @@
     incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include']
     linklibs = ['tk85', 'tcl85']
     libdirs = ['/usr/local/lib', '/usr/X11R6/lib']
+elif sys.platform.startswith("freebsd"):
+    incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include']
+    linklibs = ['tk86', 'tcl86']
+    libdirs = ['/usr/local/lib']
+elif sys.platform == 'win32':
+    incdirs = []
+    linklibs = ['tcl85', 'tk85']
+    libdirs = []
 else:
     incdirs=['/usr/include/tcl']
     linklibs=['tcl', 'tk']
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -347,6 +347,9 @@
     errno = property(_get_errno, _set_errno, None,
                      "the value of 'errno' from/to the C calls")
 
+    def getwinerror(self, code=-1):
+        return self._backend.getwinerror(code)
+
     def _pointer_to(self, ctype):
         from . import model
         with self._lock:
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -45,9 +45,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '2.1'
+version = '2.2'
 # The full version, including alpha/beta/rc tags.
-release = '2.1.0'
+release = '2.2.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -48,6 +48,6 @@
 * send announcements to pypy-dev, python-list,
   python-announce, python-dev ...
 
-* add a tag on jitviewer that corresponds to pypy release
-* add a tag on codespeed that corresponds to pypy release
+* add a tag on the pypy/jitviewer repo that corresponds to pypy release
+* add a tag on the codespeed web site that corresponds to pypy release
 
diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
--- a/pypy/doc/index.rst
+++ b/pypy/doc/index.rst
@@ -40,7 +40,7 @@
 
 * `FAQ`_: some frequently asked questions.
 
-* `Release 2.1.0`_: the latest official release
+* `Release 2.2.0`_: the latest official release
 
 * `PyPy Blog`_: news and status info about PyPy 
 
@@ -110,7 +110,7 @@
 .. _`Getting Started`: getting-started.html
 .. _`Papers`: extradoc.html
 .. _`Videos`: video-index.html
-.. _`Release 2.1.0`: http://pypy.org/download.html
+.. _`Release 2.2.0`: http://pypy.org/download.html
 .. _`speed.pypy.org`: http://speed.pypy.org
 .. _`RPython toolchain`: translation.html
 .. _`potential project ideas`: project-ideas.html
diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-2.2.0.rst
@@ -0,0 +1,89 @@
+=======================================
+PyPy 2.2 - Incrementalism
+=======================================
+
+We're pleased to announce PyPy 2.2, which targets version 2.7.3 of the Python
+language. This release main highlight is the introduction of the incremental
+garbage collector, sponsored by the `Raspberry Pi Foundation`_.
+
+This release also contains several bugfixes and performance improvements. 
+
+You can download the PyPy 2.2 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project. We showed quite a bit of progress on all three projects (see below)
+and we're slowly running out of funds.
+Please consider donating more so we can finish those projects!  The three
+projects are:
+
+* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent.
+
+* STM (software transactional memory): a preview will be released very soon,
+  as soon as we fix a few bugs
+
+* NumPy: the work done is included in the PyPy 2.2 release. More details below.
+
+.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows
+32, or ARM (ARMv6 or ARMv7, with VFPv3).
+
+Work on the native Windows 64 is still stalling, we would welcome a volunteer
+to handle that.
+
+.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org
+
+Highlights
+==========
+
+* Our Garbage Collector is now "incremental".  It should avoid almost
+  all pauses due to a major collection taking place.  Previously, it
+  would pause the program (rarely) to walk all live objects, which
+  could take arbitrarily long if your process is using a whole lot of
+  RAM.  Now the same work is done in steps.  This should make PyPy
+  more responsive, e.g. in games.  There are still other pauses, from
+  the GC and the JIT, but they should be on the order of 5
+  milliseconds each.
+
+* The JIT counters for hot code were never reset, which meant that a
+  process running for long enough would eventually JIT-compile more
+  and more rarely executed code.  Not only is it useless to compile
+  such code, but as more compiled code means more memory used, this
+  gives the impression of a memory leak.  This has been tentatively
+  fixed by decreasing the counters from time to time.
+
+* NumPy has been split: now PyPy only contains the core module, called
+  ``_numpypy``.  The ``numpy`` module itself has been moved to
+  ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared.
+  You need to install NumPy separately with a virtualenv:
+  ``pip install git+https://bitbucket.org/pypy/numpy.git``;
+  or directly:
+  ``git clone https://bitbucket.org/pypy/numpy.git``;
+  ``cd numpy``; ``pypy setup.py install``.
+
+* non-inlined calls have less overhead
+
+* Things that use ``sys.set_trace`` are now JITted (like coverage)
+
+* JSON decoding is now very fast (JSON encoding was already very fast)
+
+* various buffer copying methods experience speedups (like list-of-ints to
+  ``int[]`` buffer from cffi)
+
+* We finally wrote (hopefully) all the missing ``os.xxx()`` functions,
+  including ``os.startfile()`` on Windows and a handful of rare ones
+  on Posix.
+
+* numpy has a rudimentary C API that cooperates with ``cpyext``
+
+Cheers,
+Armin Rigo and Maciej Fijalkowski
diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst
--- a/pypy/doc/sandbox.rst
+++ b/pypy/doc/sandbox.rst
@@ -4,26 +4,36 @@
 Introduction
 ------------
 
-It is possible to compile a version of pypy-c that runs
-fully "virtualized", i.e. where an external process controls all
-input/output.  Such a pypy-c is a secure sandbox: it is safe to run
-any untrusted Python code with it.  The Python code cannot see or
-modify any local file except via interaction with the external
-process.  It is also impossible to do any other I/O or consume more
-than some amount of RAM or CPU time or real time.  This works with no
-OS support at all - just ANSI C code generated in a careful way.  It's
-the kind of thing you could embed in a browser plug-in, for example
-(it would be safe even if it wasn't run as a separate process,
-actually).
+PyPy offers sandboxing at a level similar to OS-level sandboxing (e.g.
+SECCOMP_ on Linux), but implemented in a fully portable way.  To use it,
+a (regular, trusted) program launches a subprocess that is a special
+sandboxed version of PyPy.  This subprocess can run arbitrary untrusted
+Python code, but all its input/output is serialized to a stdin/stdout
+pipe instead of being directly performed.  The outer process reads the
+pipe and decides which commands are allowed or not (sandboxing), or even
+reinterprets them differently (virtualization).  A potential attacker
+can have arbitrary code run in the subprocess, but cannot actually do
+any input/output not controlled by the outer process.  Additional
+barriers are put to limit the amount of RAM and CPU time used.
 
-For comparison, trying to plug CPython into a special virtualizing C
-library is not only OS-specific, but unsafe, because one of the known
-ways to segfault CPython could be used by an attacker to trick CPython
-into issuing malicious system calls directly.  The C code generated by
+Note that this is very different from sandboxing at the Python language
+level, i.e. placing restrictions on what kind of Python code the
+attacker is allowed to run (why? read about pysandbox_).
+
+.. _SECCOMP: http://code.google.com/p/seccompsandbox/wiki/overview
+.. _pysandbox: https://mail.python.org/pipermail/python-dev/2013-November/130132.html
+
+Another point of comparison: if we were instead to try to plug CPython
+into a special virtualizing C library, we would get a result
+that is not only OS-specific, but unsafe, because CPython can be
+segfaulted (in many ways, all of them really, really obscure).
+Given enough efforts, an attacker can turn almost any
+segfault into a vulnerability.  The C code generated by
 PyPy is not segfaultable, as long as our code generators are correct -
-that's a lower number of lines of code to trust.  For the paranoid, in
-this case we also generate systematic run-time checks against buffer
-overflows.
+that's a lower number of lines of code to trust.  For the paranoid,
+PyPy translated with sandboxing also contains systematic run-time
+checks (against buffer overflows for example)
+that are normally only present in debugging versions.
 
 .. warning::
   
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.2.rst
copy from pypy/doc/whatsnew-head.rst
copy to pypy/doc/whatsnew-2.2.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-2.2.rst
@@ -1,5 +1,5 @@
 ======================
-What's new in PyPy 2.1
+What's new in PyPy 2.2
 ======================
 
 .. this is a revision shortly after release-2.1-beta
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,140 +1,15 @@
-======================
-What's new in PyPy 2.1
-======================
+=======================
+What's new in PyPy 2.2+
+=======================
 
-.. this is a revision shortly after release-2.1-beta
-.. startrev: 4eb52818e7c0
+.. this is a revision shortly after release-2.2.x
+.. startrev: 4cd1bc8b3111
 
-.. branch: sanitise_bytecode_dispatch
-Make PyPy's bytecode dispatcher easy to read, and less reliant on RPython
-magic. There is no functional change, though the removal of dead code leads
-to many fewer tests to execute.
+.. branch: release-2.2.x
 
-.. branch: fastjson
-Fast json decoder written in RPython, about 3-4x faster than the pure Python
-decoder which comes with the stdlib
+.. branch: numpy-newbyteorder
+Clean up numpy types, add newbyteorder functionality
 
-.. branch: improve-str2charp
-Improve the performance of I/O writing up to 15% by using memcpy instead of
-copying char-by-char in str2charp and get_nonmovingbuffer
+.. branch: windows-packaging
+Package tk/tcl runtime with win32
 
-.. branch: flowoperators
-Simplify rpython/flowspace/ code by using more metaprogramming.  Create
-SpaceOperator class to gather static information about flow graph operations.
-
-.. branch: package-tk
-Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch
-to optionally skip it.
-
-.. branch: distutils-cppldflags
-Copy CPython's implementation of customize_compiler, dont call split on
-environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices.
-
-.. branch: precise-instantiate
-When an RPython class is instantiated via an indirect call (that is, which
-class is being instantiated isn't known precisely) allow the optimizer to have
-more precise information about which functions can be called. Needed for Topaz.
-
-.. branch: ssl_moving_write_buffer
-
-.. branch: pythoninspect-fix
-Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process
-to start interactive prompt when the script execution finishes. This adds
-new __pypy__.os.real_getenv call that bypasses Python cache and looksup env
-in the underlying OS. Translatorshell now works on PyPy.
-
-.. branch: add-statvfs
-Added os.statvfs and os.fstatvfs
-
-.. branch: statvfs_tests
-Added some addition tests for statvfs.
-
-.. branch: ndarray-subtype
-Allow subclassing ndarray, i.e. matrix
-
-.. branch: ndarray-sort
-Implement ndarray in-place sorting (for numeric types, no non-native byte order)
-
-.. branch: pypy-pyarray
-Implement much of numpy's c api in cpyext, allows (slow) access to ndarray
-from c
-
-.. branch: kill-ootype
-
-.. branch: fast-slowpath
-Added an abstraction for functions with a fast and slow path in the JIT. This
-speeds up list.append() and list.pop().
-
-.. branch: curses_fixes
-
-.. branch: foldable-getarrayitem-indexerror
-Constant-fold reading out of constant tuples in PyPy.
-
-.. branch: mro-reorder-numpypy-str
-No longer delegate numpy string_ methods to space.StringObject, in numpy
-this works by kind of by accident. Support for merging the refactor-str-types
-branch
-
-.. branch: kill-typesystem
-Remove the "type system" abstraction, now that there is only ever one kind of
-type system used.
-
-.. branch: kill-gen-store-back-in
-Kills gen_store_back_in_virtualizable - should improve non-inlined calls by
-a bit
-
-.. branch: dotviewer-linewidth
-.. branch: reflex-support
-.. branch: numpypy-inplace-op
-.. branch: rewritten-loop-logging
-.. branch: no-release-gil
-.. branch: safe-win-mmap
-.. branch: boolean-indexing-cleanup
-.. branch: cpyext-best_base
-.. branch: cpyext-int
-.. branch: fileops2
-
-.. branch: nobold-backtrace
-Work on improving UnionError messages and stack trace displays.
-
-.. branch: improve-errors-again
-More improvements and refactorings of error messages.
-
-.. branch: improve-errors-again2
-Unbreak tests in rlib.
-
-.. branch: less-stringly-ops
-Use subclasses of SpaceOperation instead of SpaceOperator objects.
-Random cleanups in flowspace.
-
-.. branch: file-support-in-rpython
-make open() and friends rpython
-
-.. branch: incremental-gc
-Added the new incminimark GC which performs GC in incremental steps
-
-.. branch: fast_cffi_list_init
-fastpath for cffi.new("long[]")
-
-.. branch: remove-eval-frame
-remove a pointless abstraction
-
-.. branch: jit-settrace
-Allow the jit to continue running when sys.settrace() is active, necessary to
-make coverage.py fast
-
-.. branch: remove-numpypy
-Remove lib_pypy/numpypy in favor of external numpy fork
-
-.. branch: jit-counter
-Tweak the jit counters: decay them at minor collection (actually
-only every 32 minor collection is enough). Should avoid the "memory
-leaks" observed in long-running processes, actually created by the
-jit compiling more and more rarely executed paths.
-
-.. branch: fix-trace-jit
-Fixed the usage of sys.settrace() with the JIT. Also made it so using
-sys.settrace() doesn't cause the GIL to be released on every single iteration.
-
-.. branch: rordereddict
-Implement OrderedDict in RPython
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -73,11 +73,11 @@
 https://bitbucket.org/pypy/pypy/downloads/local.zip
 Then expand it into the base directory (base_dir) and modify your environment to reflect this::
 
-    set PATH=<base_dir>\bin;%PATH%
-    set INCLUDE=<base_dir>\include;%INCLUDE%
-    set LIB=<base_dir>\lib;%LIB%
+    set PATH=<base_dir>\bin;<base_dir>\tcltk\bin;%PATH%
+    set INCLUDE=<base_dir>\include;<base_dir>\tcltk\include;%INCLUDE%
+    set LIB=<base_dir>\lib;<base_dir>\tcltk\lib;%LIB%
 
-Now you should be good to go. Read on for more information.
+Now you should be good to go. Read on for more information. 
 
 The Boehm garbage collector
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -109,11 +109,10 @@
 The bz2 compression library
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Download http://bzip.org/1.0.5/bzip2-1.0.5.tar.gz and extract it in
-the base directory.  Then compile::
-
-    cd bzip2-1.0.5
+    svn export http://svn.python.org/projects/external/bzip2-1.0.6
+    cd bzip2-1.0.6
     nmake -f makefile.msc
+    copy bzip.dll <somewhere in the PATH>\bzip.dll
     
 The sqlite3 database library
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -122,8 +121,6 @@
 wrapper is compiled when the module is imported for the first time.
 The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility.
 
-
-
 The expat XML parser
 ~~~~~~~~~~~~~~~~~~~~
 
@@ -146,13 +143,33 @@
 use the one distributed by ActiveState, or the one from cygwin.  In
 both case the perl interpreter must be found on the PATH.
 
-Get http://www.openssl.org/source/openssl-0.9.8k.tar.gz and extract it
-in the base directory. Then compile::
-
+    svn export http://svn.python.org/projects/external/openssl-0.9.8y
+    cd openssl-0.9.8y
     perl Configure VC-WIN32
     ms\do_ms.bat
     nmake -f ms\nt.mak install
 
+TkInter module support
+~~~~~~~~~~~~~~~~~~~~~~
+
+Note that much of this is taken from the cpython build process.
+Tkinter is imported via cffi, so the module is optional. To recreate the tcltk
+directory found for the release script, create the dlls, libs, headers and
+runtime by running::
+
+	svn export http://svn.python.org/projects/external/tcl-8.5.2.1 tcl85 
+	svn export http://svn.python.org/projects/external/tk-8.5.2.0 tk85
+	cd tcl85\win 
+	nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=0 INSTALLDIR=..\..\tcltk clean all 
+	nmake -f makefile.vc DEBUG=0 INSTALLDIR=..\..\tcltk install
+	cd ..\..\tk85\win 
+	nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 clean all 
+	nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 install
+
+Now you should have a tcktk\bin, tcltk\lib, and tcltk\include directory ready
+for use. The release packaging script will pick up the tcltk runtime in the lib
+directory and put it in the archive.
+
 Using the mingw compiler
 ------------------------
 
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -942,7 +942,8 @@
 
         self.w_tmp_dir = self.space.wrap(tmp_dir)
 
-        foo_py = prefix.join('foo.py').write("pass")
+        foo_py = prefix.join('foo.py')
+        foo_py.write("pass")
         self.w_foo_py = self.space.wrap(str(foo_py))
 
     def test_setup_bootstrap_path(self):
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -1,3 +1,4 @@
+import sys
 from pypy.interpreter.mixedmodule import MixedModule
 from rpython.rlib import rdynload
 
@@ -43,6 +44,8 @@
         'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")',
         'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name
         }
+    if sys.platform == 'win32':
+        interpleveldefs['getwinerror'] = 'cerrno.getwinerror'
 
 for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL",
               "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]:
diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py
--- a/pypy/module/_cffi_backend/cerrno.py
+++ b/pypy/module/_cffi_backend/cerrno.py
@@ -39,3 +39,14 @@
 def set_errno(space, errno):
     ec = get_errno_container(space)
     ec._cffi_saved_errno = errno
+
+# ____________________________________________________________
+
+ at unwrap_spec(code=int)
+def getwinerror(space, code=-1):
+    from rpython.rlib.rwin32 import FormatError
+    if code == -1:
+        ec = get_errno_container(space)
+        code = ec._cffi_saved_LastError
+    message = FormatError(code)
+    return space.newtuple([space.wrap(code), space.wrap(message)])
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1086,7 +1086,9 @@
     assert strlenaddr == cast(BVoidP, strlen)
 
 def test_read_variable():
-    if sys.platform == 'win32' or sys.platform == 'darwin':
+    ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard
+    ## https://bugs.pypy.org/issue1643
+    if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
         py.test.skip("untested")
     BVoidP = new_pointer_type(new_void_type())
     ll = find_and_load_library('c')
@@ -1094,7 +1096,9 @@
     assert stderr == cast(BVoidP, _testfunc(8))
 
 def test_read_variable_as_unknown_length_array():
-    if sys.platform == 'win32' or sys.platform == 'darwin':
+    ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard
+    ## https://bugs.pypy.org/issue1643
+    if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
         py.test.skip("untested")
     BCharP = new_pointer_type(new_primitive_type("char"))
     BArray = new_array_type(BCharP, None)
@@ -1104,7 +1108,9 @@
     # ^^ and not 'char[]', which is basically not allowed and would crash
 
 def test_write_variable():
-    if sys.platform == 'win32' or sys.platform == 'darwin':
+    ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard
+    ## https://bugs.pypy.org/issue1643
+    if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
         py.test.skip("untested")
     BVoidP = new_pointer_type(new_void_type())
     ll = find_and_load_library('c')
@@ -2687,6 +2693,16 @@
     #
     res = GetLastError()
     assert res == 42
+    #
+    SetLastError(2)
+    code, message = getwinerror()
+    assert code == 2
+    assert message == "The system cannot find the file specified"
+    #
+    code, message = getwinerror(1155)
+    assert code == 1155
+    assert message == ("No application is associated with the "
+                       "specified file for this operation")
 
 def test_nonstandard_integer_types():
     for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t',
diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py
--- a/pypy/module/_pypyjson/interp_decoder.py
+++ b/pypy/module/_pypyjson/interp_decoder.py
@@ -2,7 +2,7 @@
 import math
 from rpython.rlib.rstring import StringBuilder
 from rpython.rlib.objectmodel import specialize
-from rpython.rlib import rfloat
+from rpython.rlib import rfloat, runicode
 from rpython.rtyper.lltypesystem import lltype, rffi
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.interpreter.gateway import unwrap_spec
@@ -373,7 +373,7 @@
             return # help the annotator to know that we'll never go beyond
                    # this point
         #
-        uchr = unichr(val)
+        uchr = runicode.code_to_unichr(val)     # may be a surrogate pair again
         utf8_ch = unicodehelper.encode_utf8(self.space, uchr)
         builder.append(utf8_ch)
         return i
diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py
--- a/pypy/module/_pypyjson/test/test__pypyjson.py
+++ b/pypy/module/_pypyjson/test/test__pypyjson.py
@@ -1,5 +1,4 @@
 # -*- encoding: utf-8 -*-
-import py, sys
 from pypy.module._pypyjson.interp_decoder import JSONDecoder
 
 def test_skip_whitespace():
@@ -16,9 +15,6 @@
 class AppTest(object):
     spaceconfig = {"objspace.usemodules._pypyjson": True}
 
-    def setup_class(cls):
-        cls.w_run_on_16bit = cls.space.wrap(sys.maxunicode == 65535)
-
     def test_raise_on_unicode(self):
         import _pypyjson
         raises(TypeError, _pypyjson.loads, u"42")
@@ -183,8 +179,6 @@
         raises(ValueError, """_pypyjson.loads('["extra comma",]')""")
 
     def test_unicode_surrogate_pair(self):
-        if self.run_on_16bit:
-            skip("XXX fix me or mark definitely skipped")
         import _pypyjson
         expected = u'z\U0001d120x'
         res = _pypyjson.loads('"z\\ud834\\udd20x"')
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -10,9 +10,11 @@
 
 from pypy.interpreter.baseobjspace import W_Root
 from pypy.interpreter.buffer import RWBuffer
-from pypy.interpreter.error import OperationError
-from pypy.interpreter.gateway import interp2app, unwrap_spec, interpindirect2app
-from pypy.interpreter.typedef import GetSetProperty, make_weakref_descr, TypeDef
+from pypy.interpreter.error import OperationError, operationerrfmt
+from pypy.interpreter.gateway import (
+    interp2app, interpindirect2app, unwrap_spec)
+from pypy.interpreter.typedef import (
+    GetSetProperty, TypeDef, make_weakref_descr)
 from pypy.module._file.interp_file import W_File
 from pypy.objspace.std.floatobject import W_FloatObject
 
@@ -60,12 +62,12 @@
 def descr_typecode(space, self):
     return space.wrap(self.typecode)
 
-arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto')
+arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens=['comp_func'],
+                              reds='auto')
 EQ, NE, LT, LE, GT, GE = range(6)
 
 def compare_arrays(space, arr1, arr2, comp_op):
-    if (not isinstance(arr1, W_ArrayBase) or
-        not isinstance(arr2, W_ArrayBase)):
+    if not (isinstance(arr1, W_ArrayBase) and isinstance(arr2, W_ArrayBase)):
         return space.w_NotImplemented
     if comp_op == EQ and arr1.len != arr2.len:
         return space.w_False
@@ -236,9 +238,12 @@
             raise OperationError(self.space.w_ValueError, self.space.wrap(msg))
         oldlen = self.len
         new = len(s) / self.itemsize
+        if not new:
+            return
         self.setlen(oldlen + new)
         cbuf = self._charbuf_start()
-        copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize), 0, len(s))
+        copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize),
+                           0, len(s))
         self._charbuf_stop()
 
     @unwrap_spec(w_f=W_File, n=int)
@@ -268,8 +273,8 @@
     def descr_tofile(self, space, w_f):
         """ tofile(f)
 
-        Write all items (as machine values) to the file object f.  Also called as
-        write.
+        Write all items (as machine values) to the file object f.  Also
+        called as write.
         """
         w_s = self.descr_tostring(space)
         space.call_method(w_f, 'write', w_s)
@@ -351,8 +356,8 @@
     def descr_byteswap(self, space):
         """ byteswap()
 
-        Byteswap all items of the array.  If the items in the array are not 1, 2,
-        4, or 8 bytes in size, RuntimeError is raised.
+        Byteswap all items of the array.  If the items in the array are
+        not 1, 2, 4, or 8 bytes in size, RuntimeError is raised.
         """
         if self.itemsize not in [1, 2, 4, 8]:
             msg = "byteswap not supported for this array"
@@ -434,7 +439,8 @@
         return self.delitem(space, start, stop)
 
     def descr_delslice(self, space, w_start, w_stop):
-        self.descr_delitem(space, space.newslice(w_start, w_stop, space.w_None))
+        self.descr_delitem(space, space.newslice(w_start, w_stop,
+                                                 space.w_None))
 
     def descr_add(self, space, w_other):
         raise NotImplementedError
@@ -478,7 +484,7 @@
 W_ArrayBase.typedef = TypeDef(
     'array',
     __new__ = interp2app(w_array),
-    __module__   = 'array',
+    __module__ = 'array',
 
     __len__ = interp2app(W_ArrayBase.descr_len),
     __eq__ = interp2app(W_ArrayBase.descr_eq),
@@ -534,7 +540,8 @@
 
 
 class TypeCode(object):
-    def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'):
+    def __init__(self, itemtype, unwrap, canoverflow=False, signed=False,
+                 method='__int__'):
         self.itemtype = itemtype
         self.bytes = rffi.sizeof(itemtype)
         self.arraytype = lltype.Array(itemtype, hints={'nolength': True})
@@ -547,7 +554,7 @@
         if self.canoverflow:
             assert self.bytes <= rffi.sizeof(rffi.ULONG)
             if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \
-                   self.unwrap == 'int_w':
+                    self.unwrap == 'int_w':
                 # Treat this type as a ULONG
                 self.unwrap = 'bigint_w'
                 self.canoverflow = False
@@ -619,14 +626,15 @@
             try:
                 item = unwrap(w_item)
             except OperationError, e:
-                if isinstance(w_item, W_FloatObject): # Odd special case from cpython
+                if isinstance(w_item, W_FloatObject):
+                    # Odd special case from cpython
                     raise
                 if mytype.method != '' and e.match(space, space.w_TypeError):
                     try:
                         item = unwrap(space.call_method(w_item, mytype.method))
                     except OperationError:
                         msg = 'array item must be ' + mytype.unwrap[:-2]
-                        raise OperationError(space.w_TypeError, space.wrap(msg))
+                        raise operationerrfmt(space.w_TypeError, msg)
                 else:
                     raise
             if mytype.unwrap == 'bigint_w':
@@ -681,14 +689,13 @@
                         some = 0
                     self.allocated = size + some
                     if zero:
-                        new_buffer = lltype.malloc(mytype.arraytype,
-                                                   self.allocated, flavor='raw',
-                                                   add_memory_pressure=True,
-                                                   zero=True)
+                        new_buffer = lltype.malloc(
+                            mytype.arraytype, self.allocated, flavor='raw',
+                            add_memory_pressure=True, zero=True)
                     else:
-                        new_buffer = lltype.malloc(mytype.arraytype,
-                                                   self.allocated, flavor='raw',
-                                                   add_memory_pressure=True)
+                        new_buffer = lltype.malloc(
+                            mytype.arraytype, self.allocated, flavor='raw',
+                            add_memory_pressure=True)
                         for i in range(min(size, self.len)):
                             new_buffer[i] = self.buffer[i]
                 else:
@@ -882,9 +889,9 @@
             if i >= j:
                 return None
             oldbuffer = self.buffer
-            self.buffer = lltype.malloc(mytype.arraytype,
-                          max(self.len - (j - i), 0), flavor='raw',
-                          add_memory_pressure=True)
+            self.buffer = lltype.malloc(
+                mytype.arraytype, max(self.len - (j - i), 0), flavor='raw',
+                add_memory_pressure=True)
             if i:
                 rffi.c_memcpy(
                     rffi.cast(rffi.VOIDP, self.buffer),
diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py
--- a/pypy/module/array/test/test_array.py
+++ b/pypy/module/array/test/test_array.py
@@ -171,6 +171,9 @@
         a = self.array('c')
         a.fromstring('Hi!')
         assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3
+        a = self.array('c')
+        a.fromstring('')
+        assert not len(a)
 
         for t in 'bBhHiIlLfd':
             a = self.array(t)
diff --git a/pypy/module/binascii/interp_crc32.py b/pypy/module/binascii/interp_crc32.py
--- a/pypy/module/binascii/interp_crc32.py
+++ b/pypy/module/binascii/interp_crc32.py
@@ -1,17 +1,12 @@
 from pypy.interpreter.gateway import unwrap_spec
+from rpython.rtyper.lltypesystem import rffi
 from rpython.rlib.rarithmetic import r_uint, intmask
-from rpython.rtyper.lltypesystem import rffi
-from rpython.rlib.rzipfile import crc_32_tab
+from rpython.rlib import rzipfile
 
 @unwrap_spec(data='bufferstr', oldcrc='truncatedint_w')
 def crc32(space, data, oldcrc=0):
     "Compute the CRC-32 incrementally."
 
-    crc = r_uint(rffi.cast(rffi.UINT, ~oldcrc))   # signed => 32-bit unsigned
-
-    # in the following loop, we have always 0 <= crc < 2**32
-    for c in data:
-        crc = crc_32_tab[(crc & 0xff) ^ ord(c)] ^ (crc >> 8)
-
-    crc = ~intmask(rffi.cast(rffi.INT, crc))   # unsigned => 32-bit signed
-    return space.wrap(crc)
+    crc = rzipfile.crc32(data, r_uint(oldcrc))
+    crc = rffi.cast(rffi.INT, crc)    # unsigned => 32-bit signed
+    return space.wrap(intmask(crc))
diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
 #define PY_VERSION		"2.7.3"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "2.2.1-alpha0"
+#define PYPY_VERSION "2.3.0-alpha0"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */
diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py
--- a/pypy/module/marshal/test/test_marshalimpl.py
+++ b/pypy/module/marshal/test/test_marshalimpl.py
@@ -80,7 +80,7 @@
         #
         u = interp_marshal.StringUnmarshaller(space, space.wrap(expected))
         w_long = u.load_w_obj()
-        assert space.eq_w(w_long, w_obj) is True
+        assert space.eq_w(w_long, w_obj)
 
     for sign in [1L, -1L]:
         for i in range(100):
diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py
--- a/pypy/module/micronumpy/arrayimpl/concrete.py
+++ b/pypy/module/micronumpy/arrayimpl/concrete.py
@@ -36,10 +36,13 @@
         return backstrides
 
     def getitem(self, index):
-        return self.dtype.getitem(self, index)
+        return self.dtype.itemtype.read(self, index, 0)
+
+    def getitem_bool(self, index):
+        return self.dtype.itemtype.read_bool(self, index, 0)
 
     def setitem(self, index, value):
-        self.dtype.setitem(self, index, value)
+        self.dtype.itemtype.store(self, index, 0, value)
 
     def setslice(self, space, arr):
         impl = arr.implementation
@@ -52,7 +55,7 @@
         loop.setslice(space, shape, self, impl)
 
     def get_size(self):
-        return self.size // self.dtype.itemtype.get_element_size()
+        return self.size // self.dtype.get_size()
 
     def get_storage_size(self):
         return self.size
@@ -77,7 +80,7 @@
                 return scalar.Scalar(self.dtype, self.getitem(0))
             return None
 
-    def get_view(self, orig_array, dtype, new_shape):
+    def get_view(self, space, orig_array, dtype, new_shape):
         strides, backstrides = support.calc_strides(new_shape, dtype,
                                                     self.order)
         return SliceArray(self.start, strides, backstrides, new_shape,
@@ -208,7 +211,15 @@
                     "field named %s not found" % idx))
             return RecordChunk(idx)
         if (space.isinstance_w(w_idx, space.w_int) or
-            space.isinstance_w(w_idx, space.w_slice)):
+                space.isinstance_w(w_idx, space.w_slice)):
+            return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))])
+        elif isinstance(w_idx, W_NDimArray) and \
+                isinstance(w_idx.implementation, scalar.Scalar):
+            w_idx = w_idx.get_scalar_value().item(space)
+            if not space.isinstance_w(w_idx, space.w_int) and \
+                    not space.isinstance_w(w_idx, space.w_bool):
+                raise OperationError(space.w_IndexError, space.wrap(
+                    "arrays used as indices must be of integer (or boolean) type"))
             return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))])
         elif space.is_w(w_idx, space.w_None):
             return Chunks([NewAxisChunk()])
@@ -268,7 +279,7 @@
     def create_dot_iter(self, shape, skip):
         r = calculate_dot_strides(self.get_strides(), self.get_backstrides(),
                                   shape, skip)
-        return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape)
+        return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape)
 
     def swapaxes(self, space, orig_arr, axis1, axis2):
         shape = self.get_shape()[:]
@@ -331,21 +342,24 @@
                 support.product(shape) > support.product(self.get_shape()):
             r = calculate_broadcast_strides(self.get_strides(),
                                             self.get_backstrides(),
-                                            self.get_shape(), shape, backward_broadcast)
-            return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape)
-
+                                            self.get_shape(), shape,
+                                            backward_broadcast)
+            return iter.MultiDimViewIterator(self, self.start,
+                                             r[0], r[1], shape)
         if not require_index:
             return iter.ConcreteArrayIterator(self)
-        else:
-            if len(self.get_shape()) == 1:
-                return iter.OneDimViewIterator(self, self.dtype, self.start,
-                        self.get_strides(), self.get_shape())
-            else:
-                return iter.MultiDimViewIterator(self, self.dtype, self.start,
-                        self.get_strides(), self.get_backstrides(), self.get_shape())
+        if len(self.get_shape()) == 1:
+            return iter.OneDimViewIterator(self, self.start,
+                                           self.get_strides(),
+                                           self.get_shape())
+        return iter.MultiDimViewIterator(self, self.start,
+                                         self.get_strides(),
+                                         self.get_backstrides(),
+                                         self.get_shape())
 
     def fill(self, box):
-        self.dtype.fill(self.storage, box, 0, self.size)
+        self.dtype.itemtype.fill(self.storage, self.dtype.get_size(),
+                                 box, 0, self.size, 0)
 
     def set_shape(self, space, orig_array, new_shape):
         strides, backstrides = support.calc_strides(new_shape, self.dtype,
@@ -399,7 +413,7 @@
         self.storage = parent.storage
         self.order = parent.order
         self.dtype = dtype
-        self.size = support.product(shape) * self.dtype.itemtype.get_element_size()
+        self.size = support.product(shape) * self.dtype.get_size()
         self.start = start
         self.orig_arr = orig_arr
 
@@ -416,14 +430,16 @@
                                             self.get_backstrides(),
                                             self.get_shape(), shape,
                                             backward_broadcast)
-            return iter.MultiDimViewIterator(self.parent, self.dtype,
-                                             self.start, r[0], r[1], shape)
+            return iter.MultiDimViewIterator(self, self.start,
+                                             r[0], r[1], shape)
         if len(self.get_shape()) == 1:
-            return iter.OneDimViewIterator(self.parent, self.dtype, self.start,
-                    self.get_strides(), self.get_shape())
-        return iter.MultiDimViewIterator(self.parent, self.dtype, self.start,
-                                    self.get_strides(),
-                                    self.get_backstrides(), self.get_shape())
+            return iter.OneDimViewIterator(self, self.start,
+                                           self.get_strides(),
+                                           self.get_shape())
+        return iter.MultiDimViewIterator(self, self.start,
+                                         self.get_strides(),
+                                         self.get_backstrides(),
+                                         self.get_shape())
 
     def set_shape(self, space, orig_array, new_shape):
         if len(self.get_shape()) < 2 or self.size == 0:
diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py
--- a/pypy/module/micronumpy/arrayimpl/scalar.py
+++ b/pypy/module/micronumpy/arrayimpl/scalar.py
@@ -68,9 +68,15 @@
     def transpose(self, _):
         return self
 
-    def get_view(self, orig_array, dtype, new_shape):
+    def get_view(self, space, orig_array, dtype, new_shape):
         scalar = Scalar(dtype)
-        scalar.value = self.value.convert_to(dtype)
+        if dtype.is_str_or_unicode():
+            scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str()))
+        elif dtype.is_record_type():
+            raise OperationError(space.w_NotImplementedError, space.wrap(
+                "viewing scalar as record not implemented"))
+        else:
+            scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str())
         return scalar
 
     def get_real(self, orig_array):
@@ -123,20 +129,24 @@
                             )
 
     def descr_getitem(self, space, _, w_idx):
+        if space.isinstance_w(w_idx, space.w_tuple):
+            if space.len_w(w_idx) == 0:
+                return self.get_scalar_value()
         raise OperationError(space.w_IndexError,
-                             space.wrap("scalars cannot be indexed"))
+                             space.wrap("0-d arrays can't be indexed"))
 
     def getitem_index(self, space, idx):
         raise OperationError(space.w_IndexError,
-                             space.wrap("scalars cannot be indexed"))
+                             space.wrap("0-d arrays can't be indexed"))
 
     def descr_setitem(self, space, _, w_idx, w_val):
         raise OperationError(space.w_IndexError,
-                             space.wrap("scalars cannot be indexed"))
+                             space.wrap("0-d arrays can't be indexed"))
 
     def setitem_index(self, space, idx, w_val):
         raise OperationError(space.w_IndexError,
-                             space.wrap("scalars cannot be indexed"))
+                             space.wrap("0-d arrays can't be indexed"))
+
     def set_shape(self, space, orig_array, new_shape):
         if not new_shape:
             return self
diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py
--- a/pypy/module/micronumpy/conversion_utils.py
+++ b/pypy/module/micronumpy/conversion_utils.py
@@ -1,6 +1,27 @@
 from pypy.interpreter.error import OperationError
 from pypy.module.micronumpy.constants import *
 
+
+def byteorder_converter(space, new_order):
+    endian = new_order[0]
+    if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP):
+        ch = endian
+        if ch in ('b', 'B'):
+            endian = NPY_BIG
+        elif ch in ('l', 'L'):
+            endian = NPY_LITTLE
+        elif ch in ('n', 'N'):
+            endian = NPY_NATIVE
+        elif ch in ('i', 'I'):
+            endian = NPY_IGNORE
+        elif ch in ('s', 'S'):
+            endian = NPY_SWAP
+        else:
+            raise OperationError(space.w_ValueError, space.wrap(
+                "%s is an unrecognized byteorder" % new_order))
+    return endian
+
+
 def clipmode_converter(space, w_mode):
     if space.is_none(w_mode):
         return NPY_RAISE
@@ -19,6 +40,7 @@
     raise OperationError(space.w_TypeError,
                          space.wrap("clipmode not understood"))
 
+
 def order_converter(space, w_order, default):
     if space.is_none(w_order):
         return default
@@ -40,3 +62,25 @@
         else:
             raise OperationError(space.w_TypeError, space.wrap(
                 "order not understood"))
+
+
+def multi_axis_converter(space, w_axis, ndim):
+    if space.is_none(w_axis):
+        return [True] * ndim
+    out = [False] * ndim
+    if not space.isinstance_w(w_axis, space.w_tuple):
+        w_axis = space.newtuple([w_axis])
+    for w_item in space.fixedview(w_axis):
+        item = space.int_w(w_item)
+        axis = item
+        if axis < 0:
+            axis += ndim
+        if axis < 0 or axis >= ndim:
+            raise OperationError(space.w_ValueError, space.wrap(
+                "'axis' entry %d is out of bounds [-%d, %d)" %
+                (item, ndim, ndim)))
+        if out[axis]:
+            raise OperationError(space.w_ValueError, space.wrap(
+                "duplicate value in 'axis'"))
+        out[axis] = True
+    return out
diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py
--- a/pypy/module/micronumpy/interp_arrayops.py
+++ b/pypy/module/micronumpy/interp_arrayops.py
@@ -106,16 +106,26 @@
     args_w = [convert_to_array(space, w_arg) for w_arg in args_w]
     dtype = args_w[0].get_dtype()
     shape = args_w[0].get_shape()[:]
-    _axis = axis
+    ndim = len(shape)
+    orig_axis = axis
     if axis < 0:
-        _axis = len(shape) + axis
+        axis = ndim + axis
+    if ndim == 1 and axis != 0:
+        axis = 0
+    if axis < 0 or axis >= ndim:
+        raise operationerrfmt(space.w_IndexError,
+            "axis %d out of bounds [0, %d)", orig_axis, ndim)
     for arr in args_w[1:]:
+        if len(arr.get_shape()) != ndim:
+            raise OperationError(space.w_ValueError, space.wrap(
+                "all the input arrays must have same number of dimensions"))
         for i, axis_size in enumerate(arr.get_shape()):
-            if len(arr.get_shape()) != len(shape) or (i != _axis and axis_size != shape[i]):
+            if i == axis:
+                shape[i] += axis_size
+            elif axis_size != shape[i]:
                 raise OperationError(space.w_ValueError, space.wrap(
-                    "all the input arrays must have same number of dimensions"))
-            elif i == _axis:
-                shape[i] += axis_size
+                    "all the input array dimensions except for the "
+                    "concatenation axis must match exactly"))
         a_dt = arr.get_dtype()
         if dtype.is_record_type() and a_dt.is_record_type():
             # Record types must match
@@ -129,19 +139,17 @@
                         space.wrap("invalid type promotion"))
         dtype = interp_ufuncs.find_binop_result_dtype(space, dtype,
                                                       arr.get_dtype())
-        if _axis < 0 or len(arr.get_shape()) <= _axis:
-            raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape))
     # concatenate does not handle ndarray subtypes, it always returns a ndarray
     res = W_NDimArray.from_shape(space, shape, dtype, 'C')
     chunks = [Chunk(0, i, 1, i) for i in shape]
     axis_start = 0
     for arr in args_w:
-        if arr.get_shape()[_axis] == 0:
+        if arr.get_shape()[axis] == 0:
             continue
-        chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1,
-                             arr.get_shape()[_axis])
+        chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1,
+                             arr.get_shape()[axis])
         Chunks(chunks).apply(space, res).implementation.setslice(space, arr)
-        axis_start += arr.get_shape()[_axis]
+        axis_start += arr.get_shape()[axis]
     return res
 
 @unwrap_spec(repeats=int)
diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py
--- a/pypy/module/micronumpy/interp_boxes.py
+++ b/pypy/module/micronumpy/interp_boxes.py
@@ -255,7 +255,10 @@
         return convert_to_array(space, w_values)
 
     @unwrap_spec(decimals=int)
-    def descr_round(self, space, decimals=0):
+    def descr_round(self, space, decimals=0, w_out=None):
+        if not space.is_none(w_out):
+            raise OperationError(space.w_NotImplementedError, space.wrap(
+                "out not supported"))
         v = self.convert_to(self.get_dtype(space))
         return self.get_dtype(space).itemtype.round(v, decimals)
 
@@ -269,11 +272,19 @@
         from pypy.module.micronumpy.interp_dtype import W_Dtype
         dtype = space.interp_w(W_Dtype,
             space.call_function(space.gettypefor(W_Dtype), w_dtype))
+        if dtype.get_size() == 0:
+            raise OperationError(space.w_TypeError, space.wrap(
+                "data-type must not be 0-sized"))
         if dtype.get_size() != self.get_dtype(space).get_size():
             raise OperationError(space.w_ValueError, space.wrap(
                 "new type not compatible with array."))
-        raise OperationError(space.w_NotImplementedError, space.wrap(
-            "view not implelemnted yet"))
+        if dtype.is_str_or_unicode():
+            return dtype.coerce(space, space.wrap(self.raw_str()))
+        elif dtype.is_record_type():
+            raise OperationError(space.w_NotImplementedError, space.wrap(
+                "viewing scalar as record not implemented"))
+        else:
+            return dtype.itemtype.runpack_str(space, self.raw_str())
 
     def descr_self(self, space):
         return self
@@ -281,6 +292,9 @@
     def descr_get_dtype(self, space):
         return self.get_dtype(space)
 
+    def descr_get_size(self, space):
+        return space.wrap(1)
+
     def descr_get_itemsize(self, space):
         return self.get_dtype(space).descr_get_itemsize(space)
 
@@ -407,6 +421,9 @@
     def get_dtype(self, space):
         return self.arr.dtype
 
+    def raw_str(self):
+        return self.arr.dtype.itemtype.to_str(self)
+
 class W_VoidBox(W_FlexibleBox):
     def descr_getitem(self, space, w_item):
         if space.isinstance_w(w_item, space.w_basestring):
@@ -551,6 +568,7 @@
     copy = interp2app(W_GenericBox.descr_copy),
 
     dtype = GetSetProperty(W_GenericBox.descr_get_dtype),
+    size = GetSetProperty(W_GenericBox.descr_get_size),
     itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize),
     nbytes = GetSetProperty(W_GenericBox.descr_get_itemsize),
     shape = GetSetProperty(W_GenericBox.descr_get_shape),
diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
--- a/pypy/module/micronumpy/interp_dtype.py
+++ b/pypy/module/micronumpy/interp_dtype.py
@@ -3,20 +3,22 @@
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
-    interp_attrproperty, interp_attrproperty_w)
+                                      interp_attrproperty, interp_attrproperty_w)
 from pypy.module.micronumpy import types, interp_boxes, base
 from rpython.rlib.objectmodel import specialize
 from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong
 from rpython.rtyper.lltypesystem import rffi
 from rpython.rlib import jit
+from pypy.module.micronumpy.conversion_utils import byteorder_converter
 from pypy.module.micronumpy.constants import *
 
 
 def decode_w_dtype(space, w_dtype):
     if space.is_none(w_dtype):
         return None
-    return space.interp_w(W_Dtype,
-          space.call_function(space.gettypefor(W_Dtype), w_dtype))
+    return space.interp_w(
+        W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype))
+
 
 @jit.unroll_safe
 def dtype_agreement(space, w_arr_list, shape, out=None):
@@ -33,11 +35,14 @@
     out = base.W_NDimArray.from_shape(space, shape, dtype)
     return out
 
+
 class W_Dtype(W_Root):
-    _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "float_type"]
+    _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char",
+                          "w_box_type", "byteorder", "size?", "float_type",
+                          "fields?", "fieldnames?", "shape", "subdtype", "base"]
 
     def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE,
-                 alternate_constructors=[], aliases=[], float_type=None,
+                 size=1, alternate_constructors=[], aliases=[], float_type=None,
                  fields=None, fieldnames=None, shape=[], subdtype=None):
         self.itemtype = itemtype
         self.num = num
@@ -46,6 +51,7 @@
         self.char = char
         self.w_box_type = w_box_type
         self.byteorder = byteorder
+        self.size = size
         self.alternate_constructors = alternate_constructors
         self.aliases = aliases
         self.float_type = float_type
@@ -77,19 +83,6 @@
     def coerce(self, space, w_item):
         return self.itemtype.coerce(space, self, w_item)
 
-    def getitem(self, arr, i):
-        item = self.itemtype.read(arr, i, 0)
-        return item
-
-    def getitem_bool(self, arr, i):
-        return self.itemtype.read_bool(arr, i, 0)
-
-    def setitem(self, arr, i, box):
-        self.itemtype.store(arr, i, 0, box)
-
-    def fill(self, storage, box, start, stop):
-        self.itemtype.fill(storage, self.get_size(), box, start, stop, 0)
-
     def is_int_type(self):
         return (self.kind == NPY_SIGNEDLTR or self.kind == NPY_UNSIGNEDLTR or
                 self.kind == NPY_GENBOOLLTR)
@@ -101,7 +94,7 @@
         return self.kind == NPY_COMPLEXLTR
 
     def is_float_type(self):
-        return (self.kind == NPY_FLOATINGLTR or self.float_type is not None)
+        return self.kind == NPY_FLOATINGLTR or self.kind == NPY_COMPLEXLTR
 
     def is_bool_type(self):
         return self.kind == NPY_GENBOOLLTR
@@ -122,7 +115,7 @@
         return self.byteorder in (NPY_NATIVE, NPY_NATBYTE)
 
     def get_size(self):
-        return self.itemtype.get_element_size()
+        return self.size * self.itemtype.get_element_size()
 
     def get_name(self):
         if self.char == 'S':
@@ -136,7 +129,7 @@
         return space.wrap("dtype('%s')" % self.get_name())
 
     def descr_get_itemsize(self, space):
-        return space.wrap(self.itemtype.get_element_size())
+        return space.wrap(self.get_size())
 
     def descr_get_alignment(self, space):
         return space.wrap(self.itemtype.alignment)
@@ -150,7 +143,7 @@
         if basic == NPY_UNICODELTR:
             size >>= 2
             endian = NPY_NATBYTE
-        elif size <= 1:
+        elif size // (self.size or 1) <= 1:
             endian = NPY_IGNORE
         else:
             endian = self.byteorder
@@ -158,6 +151,14 @@
                 endian = NPY_NATBYTE
         return space.wrap("%s%s%s" % (endian, basic, size))
 
+    def descr_get_descr(self, space):
+        if not self.is_record_type():
+            return space.newlist([space.newtuple([space.wrap(""),
+                                                  self.descr_get_str(space)])])
+        else:
+            raise OperationError(space.w_NotImplementedError, space.wrap(
+                "descr not implemented for record types"))
+
     def descr_get_base(self, space):
         return space.wrap(self.base)
 
@@ -196,7 +197,6 @@
             self.fields = None
         else:
             self.fields = {}
-            ofs_and_items = []
             size = 0
             for key in space.listview(w_fields):
                 value = space.getitem(w_fields, key)
@@ -207,11 +207,11 @@
                 offset = space.int_w(space.getitem(value, space.wrap(1)))
                 self.fields[space.str_w(key)] = offset, dtype
 
-                ofs_and_items.append((offset, dtype.itemtype))
-                size += dtype.itemtype.get_element_size()
+                size += dtype.get_size()
 
-            self.itemtype = types.RecordType(ofs_and_items, size)
-            self.name = "void" + str(8 * self.itemtype.get_element_size())
+            self.itemtype = types.RecordType()
+            self.size = size
+            self.name = "void" + str(8 * self.get_size())
 
     def descr_get_names(self, space):
         if self.fieldnames is None:
@@ -232,6 +232,9 @@
                         raise
                     break
 
+    def descr_get_hasobject(self, space):
+        return space.w_False
+
     def descr_getitem(self, space, w_item):
         if self.fields is None:
             raise OperationError(space.w_KeyError, space.wrap(
@@ -263,7 +266,7 @@
         w_class = space.type(self)
 
         kind = self.kind
-        elemsize = self.itemtype.get_element_size()
+        elemsize = self.get_size()
         builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)])
 
         version = space.wrap(3)
@@ -308,11 +311,24 @@
         fields = space.getitem(w_data, space.wrap(4))
         self.set_fields(space, fields)
 
+    @unwrap_spec(new_order=str)
+    def descr_newbyteorder(self, space, new_order=NPY_SWAP):
+        newendian = byteorder_converter(space, new_order)
+        endian = self.byteorder
+        if endian != NPY_IGNORE:
+            if newendian == NPY_SWAP:
+                endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE
+            elif newendian != NPY_IGNORE:
+                endian = newendian
+        itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE))
+        return W_Dtype(itemtype, self.num, self.kind, self.name, self.char,
+                       self.w_box_type, endian, size=self.size)
+
+
 def dtype_from_list(space, w_lst):
     lst_w = space.listview(w_lst)
     fields = {}
     offset = 0
-    ofs_and_items = []
     fieldnames = []
     for w_elem in lst_w:
         size = 1
@@ -320,7 +336,7 @@
         if space.len_w(w_elem) == 3:
             w_fldname, w_flddesc, w_shape = space.fixedview(w_elem)
             if not base.issequence_w(space, w_shape):
-                w_shape = space.newtuple([w_shape,])
+                w_shape = space.newtuple([w_shape])
         else:
             w_fldname, w_flddesc = space.fixedview(w_elem, 2)
         subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape)
@@ -329,27 +345,31 @@
             raise OperationError(space.w_ValueError, space.wrap("two fields with the same name"))
         assert isinstance(subdtype, W_Dtype)
         fields[fldname] = (offset, subdtype)
-        ofs_and_items.append((offset, subdtype.itemtype))
-        offset += subdtype.itemtype.get_element_size() * size
+        offset += subdtype.get_size() * size
         fieldnames.append(fldname)
-    itemtype = types.RecordType(ofs_and_items, offset)
-    return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, "void" + str(8 * itemtype.get_element_size()),
-                   NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields,
-                   fieldnames=fieldnames)
+    itemtype = types.RecordType()
+    return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR,
+                   "void" + str(8 * offset * itemtype.get_element_size()),
+                   NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox),
+                   fields=fields, fieldnames=fieldnames, size=offset)
+
 
 def dtype_from_dict(space, w_dict):
     raise OperationError(space.w_NotImplementedError, space.wrap(
         "dtype from dict"))
 
+
 def dtype_from_spec(space, name):
         raise OperationError(space.w_NotImplementedError, space.wrap(
             "dtype from spec"))
 
+
 def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None):
     # w_align and w_copy are necessary for pickling
     cache = get_dtype_cache(space)
 
-    if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or space.len_w(w_shape) > 0):
+    if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or
+                                space.len_w(w_shape) > 0):
         subdtype = descr__new__(space, w_subtype, w_dtype, w_align, w_copy)
         assert isinstance(subdtype, W_Dtype)
         size = 1
@@ -360,8 +380,11 @@
             dim = space.int_w(w_dim)
             shape.append(dim)
             size *= dim
-        return W_Dtype(types.VoidType(subdtype.itemtype.get_element_size() * size), NPY_VOID, NPY_VOIDLTR, "void" + str(8 * subdtype.itemtype.get_element_size() * size),
-                    NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype)
+        return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR,
+                       "void" + str(8 * subdtype.get_size() * size),
+                       NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox),
+                       shape=shape, subdtype=subdtype,
+                       size=subdtype.get_size() * size)
 
     if space.is_none(w_dtype):
         return cache.w_float64dtype
@@ -375,10 +398,10 @@
             return cache.dtypes_by_name[name]
         except KeyError:
             pass
-        if name[0] in 'VSUc' or name[0] in '<>=' and name[1] in 'VSUc':
+        if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc':
             return variable_dtype(space, name)
         raise OperationError(space.w_TypeError, space.wrap(
-                       "data type %s not understood" % name))
+            "data type %s not understood" % name))
     elif space.isinstance_w(w_dtype, space.w_list):
         return dtype_from_list(space, w_dtype)
     elif space.isinstance_w(w_dtype, space.w_tuple):
@@ -413,6 +436,7 @@
 
     __reduce__ = interp2app(W_Dtype.descr_reduce),
     __setstate__ = interp2app(W_Dtype.descr_setstate),
+    newbyteorder = interp2app(W_Dtype.descr_newbyteorder),
 
     type = interp_attrproperty_w("w_box_type", cls=W_Dtype),
     kind = interp_attrproperty("kind", cls=W_Dtype),
@@ -430,12 +454,14 @@
     isnative = GetSetProperty(W_Dtype.descr_get_isnative),
     fields = GetSetProperty(W_Dtype.descr_get_fields),
     names = GetSetProperty(W_Dtype.descr_get_names),
+    hasobject = GetSetProperty(W_Dtype.descr_get_hasobject),
+    descr = GetSetProperty(W_Dtype.descr_get_descr),
 )
 W_Dtype.typedef.acceptable_as_base_class = False
 
 
 def variable_dtype(space, name):
-    if name[0] in '<>=':
+    if name[0] in '<>=|':
         name = name[1:]
     char = name[0]
     if len(name) == 1:
@@ -450,17 +476,17 @@
         size = 1
 
     if char == NPY_STRINGLTR:
-        itemtype = types.StringType(size)
+        itemtype = types.StringType()
         basename = 'string'
         num = NPY_STRING
         w_box_type = space.gettypefor(interp_boxes.W_StringBox)
     elif char == NPY_VOIDLTR:
-        itemtype = types.VoidType(size)
+        itemtype = types.VoidType()
         basename = 'void'
         num = NPY_VOID


More information about the pypy-commit mailing list