[pypy-commit] pypy stm-thread-2: hg merge default

arigo noreply at buildbot.pypy.org
Wed Oct 3 16:45:21 CEST 2012


Author: Armin Rigo <arigo at tunes.org>
Branch: stm-thread-2
Changeset: r57774:c3dbd0810819
Date: 2012-10-03 14:46 +0000
http://bitbucket.org/pypy/pypy/changeset/c3dbd0810819/

Log:	hg merge default

diff too long, truncating to 2000 out of 14552 lines

diff --git a/lib-python/2.7/test/test_csv.py b/lib-python/2.7/test/test_csv.py
--- a/lib-python/2.7/test/test_csv.py
+++ b/lib-python/2.7/test/test_csv.py
@@ -20,7 +20,8 @@
     """
     def _test_arg_valid(self, ctor, arg):
         self.assertRaises(TypeError, ctor)
-        self.assertRaises(TypeError, ctor, None)
+        # PyPy gets an AttributeError instead of a TypeError
+        self.assertRaises((TypeError, AttributeError), ctor, None)
         self.assertRaises(TypeError, ctor, arg, bad_attr = 0)
         self.assertRaises(TypeError, ctor, arg, delimiter = 0)
         self.assertRaises(TypeError, ctor, arg, delimiter = 'XX')
@@ -59,7 +60,8 @@
         self.assertRaises((TypeError, AttributeError), setattr, obj.dialect,
                           'delimiter', ':')
         self.assertRaises(AttributeError, delattr, obj.dialect, 'quoting')
-        self.assertRaises(AttributeError, setattr, obj.dialect,
+        # PyPy gets a TypeError instead of an AttributeError
+        self.assertRaises((AttributeError, TypeError), setattr, obj.dialect,
                           'quoting', None)
 
     def test_reader_attrs(self):
@@ -133,7 +135,8 @@
             os.unlink(name)
 
     def test_write_arg_valid(self):
-        self.assertRaises(csv.Error, self._write_test, None, '')
+        # PyPy gets a TypeError instead of a csv.Error for "not a sequence"
+        self.assertRaises((csv.Error, TypeError), self._write_test, None, '')
         self._write_test((), '')
         self._write_test([None], '""')
         self.assertRaises(csv.Error, self._write_test,
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -183,7 +183,7 @@
     RegrTest('test_cpickle.py', core=True),
     RegrTest('test_cprofile.py'), 
     RegrTest('test_crypt.py', usemodules='crypt', skip=skip_win32),
-    RegrTest('test_csv.py'),
+    RegrTest('test_csv.py', usemodules='_csv'),
 
     RegrTest('test_curses.py', skip="unsupported extension module"),
     RegrTest('test_datetime.py'),
diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py
--- a/lib_pypy/_csv.py
+++ b/lib_pypy/_csv.py
@@ -363,9 +363,7 @@
                             (self.dialect.delimiter, self.dialect.quotechar))
 
         elif self.state == self.EAT_CRNL:
-            if c in '\r\n':
-                pass
-            else:
+            if c not in '\r\n':
                 raise Error("new-line character seen in unquoted field - "
                             "do you need to open the file "
                             "in universal-newline mode?")
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -81,7 +81,9 @@
         addr = self._buffer[0]
         if addr == 0:
             raise ValueError("NULL pointer access")
-        return self._type_.from_address(addr)
+        instance = self._type_.from_address(addr)
+        instance.__dict__['_base'] = self
+        return instance
 
     def setcontents(self, value):
         if not isinstance(value, self._type_):
diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py
--- a/lib_pypy/numpypy/core/_methods.py
+++ b/lib_pypy/numpypy/core/_methods.py
@@ -1,62 +1,77 @@
 # Array methods which are called by the both the C-code for the method
 # and the Python code for the NumPy-namespace function
 
+#from numpy.core import multiarray as mu
+#from numpy.core import umath as um
 import _numpypy as mu
 um = mu
-#from numpypy.core import umath as um
-from numpypy.core.numeric import asanyarray
+from numpy.core.numeric import asanyarray
 
-def _amax(a, axis=None, out=None, skipna=False, keepdims=False):
+def _amax(a, axis=None, out=None, keepdims=False):
     return um.maximum.reduce(a, axis=axis,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _amin(a, axis=None, out=None, skipna=False, keepdims=False):
+def _amin(a, axis=None, out=None, keepdims=False):
     return um.minimum.reduce(a, axis=axis,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _sum(a, axis=None, dtype=None, out=None, skipna=False, keepdims=False):
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
     return um.add.reduce(a, axis=axis, dtype=dtype,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _prod(a, axis=None, dtype=None, out=None, skipna=False, keepdims=False):
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
     return um.multiply.reduce(a, axis=axis, dtype=dtype,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _mean(a, axis=None, dtype=None, out=None, skipna=False, keepdims=False):
+def _any(a, axis=None, dtype=None, out=None, keepdims=False):
+    return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
+                                keepdims=keepdims)
+
+def _all(a, axis=None, dtype=None, out=None, keepdims=False):
+    return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
+                                 keepdims=keepdims)
+
+def _count_reduce_items(arr, axis):
+    if axis is None:
+        axis = tuple(xrange(arr.ndim))
+    if not isinstance(axis, tuple):
+        axis = (axis,)
+    items = 1
+    for ax in axis:
+        items *= arr.shape[ax]
+    return items
+
+def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
     arr = asanyarray(a)
 
     # Upgrade bool, unsigned int, and int to float64
     if dtype is None and arr.dtype.kind in ['b','u','i']:
         ret = um.add.reduce(arr, axis=axis, dtype='f8',
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
     else:
         ret = um.add.reduce(arr, axis=axis, dtype=dtype,
-                            out=out, skipna=skipna, keepdims=keepdims)
-    rcount = mu.count_reduce_items(arr, axis=axis,
-                            skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
+    rcount = _count_reduce_items(arr, axis)
     if isinstance(ret, mu.ndarray):
         ret = um.true_divide(ret, rcount,
-                        casting='unsafe', subok=False)
+                        out=ret, casting='unsafe', subok=False)
     else:
         ret = ret / float(rcount)
     return ret
 
 def _var(a, axis=None, dtype=None, out=None, ddof=0,
-                            skipna=False, keepdims=False):
+                            keepdims=False):
     arr = asanyarray(a)
 
     # First compute the mean, saving 'rcount' for reuse later
     if dtype is None and arr.dtype.kind in ['b','u','i']:
-        arrmean = um.add.reduce(arr, axis=axis, dtype='f8',
-                            skipna=skipna, keepdims=True)
+        arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
     else:
-        arrmean = um.add.reduce(arr, axis=axis, dtype=dtype,
-                            skipna=skipna, keepdims=True)
-    rcount = mu.count_reduce_items(arr, axis=axis,
-                            skipna=skipna, keepdims=True)
+        arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
+    rcount = _count_reduce_items(arr, axis)
     if isinstance(arrmean, mu.ndarray):
         arrmean = um.true_divide(arrmean, rcount,
-                                  casting='unsafe', subok=False)
+                            out=arrmean, casting='unsafe', subok=False)
     else:
         arrmean = arrmean / float(rcount)
 
@@ -65,13 +80,12 @@
 
     # (arr - arrmean) ** 2
     if arr.dtype.kind == 'c':
-        x = um.multiply(x, um.conjugate(x)).real
+        x = um.multiply(x, um.conjugate(x), out=x).real
     else:
-        x = um.multiply(x, x)
+        x = um.multiply(x, x, out=x)
 
     # add.reduce((arr - arrmean) ** 2, axis)
-    ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out,
-                        skipna=skipna, keepdims=keepdims)
+    ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
 
     # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
     if not keepdims and isinstance(rcount, mu.ndarray):
@@ -79,19 +93,18 @@
     rcount -= ddof
     if isinstance(ret, mu.ndarray):
         ret = um.true_divide(ret, rcount,
-                        casting='unsafe', subok=False)
+                        out=ret, casting='unsafe', subok=False)
     else:
         ret = ret / float(rcount)
 
     return ret
 
-def _std(a, axis=None, dtype=None, out=None, ddof=0,
-                            skipna=False, keepdims=False):
+def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
     ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
-                                skipna=skipna, keepdims=keepdims)
+               keepdims=keepdims)
 
     if isinstance(ret, mu.ndarray):
-        ret = um.sqrt(ret)
+        ret = um.sqrt(ret, out=ret)
     else:
         ret = um.sqrt(ret)
 
diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py
--- a/lib_pypy/numpypy/core/arrayprint.py
+++ b/lib_pypy/numpypy/core/arrayprint.py
@@ -14,9 +14,9 @@
 
 import sys
 import _numpypy as _nt
-from _numpypy import maximum, minimum, absolute, not_equal, isinf, isnan, isna
+from _numpypy import maximum, minimum, absolute, not_equal, isnan, isinf
 #from _numpypy import format_longfloat, datetime_as_string, datetime_data
-from .fromnumeric import ravel
+from fromnumeric import ravel
 
 
 def product(x, y): return x*y
@@ -29,7 +29,6 @@
 _line_width = 75
 _nan_str = 'nan'
 _inf_str = 'inf'
-_na_str = 'NA'
 _formatter = None  # formatting function for array elements
 
 if sys.version_info[0] >= 3:
@@ -37,7 +36,7 @@
 
 def set_printoptions(precision=None, threshold=None, edgeitems=None,
                      linewidth=None, suppress=None,
-                     nanstr=None, infstr=None, nastr=None,
+                     nanstr=None, infstr=None,
                      formatter=None):
     """
     Set printing options.
@@ -65,8 +64,6 @@
         String representation of floating point not-a-number (default nan).
     infstr : str, optional
         String representation of floating point infinity (default inf).
-    nastr : str, optional
-        String representation of NA missing value (default NA).
     formatter : dict of callables, optional
         If not None, the keys should indicate the type(s) that the respective
         formatting function applies to.  Callables should return a string.
@@ -144,7 +141,7 @@
 
     global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \
            _line_width, _float_output_suppress_small, _nan_str, _inf_str, \
-           _na_str, _formatter
+           _formatter
     if linewidth is not None:
         _line_width = linewidth
     if threshold is not None:
@@ -159,8 +156,6 @@
         _nan_str = nanstr
     if infstr is not None:
         _inf_str = infstr
-    if nastr is not None:
-        _na_str = nastr
     _formatter = formatter
 
 def get_printoptions():
@@ -195,7 +190,6 @@
              suppress=_float_output_suppress_small,
              nanstr=_nan_str,
              infstr=_inf_str,
-             nastr=_na_str,
              formatter=_formatter)
     return d
 
@@ -219,19 +213,14 @@
     return b
 
 def _boolFormatter(x):
-    if isna(x):
-        return str(x).replace('NA', _na_str, 1)
-    elif x:
+    if x:
         return ' True'
     else:
         return 'False'
 
 
 def repr_format(x):
-    if isna(x):
-        return str(x).replace('NA', _na_str, 1)
-    else:
-        return repr(x)
+    return repr(x)
 
 def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
                   prefix="", formatter=None):
@@ -262,8 +251,8 @@
                   #'complexfloat' : ComplexFormat(data, precision,
                   #                               suppress_small),
                   #'longcomplexfloat' : LongComplexFormat(precision),
-                  #'datetime' : DatetimeFormat(data),
-                  #'timedelta' : TimedeltaFormat(data),
+                  'datetime' : DatetimeFormat(data),
+                  'timedelta' : TimedeltaFormat(data),
                   'numpystr' : repr_format,
                   'str' : str}
 
@@ -309,11 +298,11 @@
             #    format_function = formatdict['longfloat']
             #else:
             format_function = formatdict['float']
-        elif issubclass(dtypeobj, _nt.complexfloating):
-            if issubclass(dtypeobj, _nt.clongfloat):
-                format_function = formatdict['longcomplexfloat']
-            else:
-                format_function = formatdict['complexfloat']
+        #elif issubclass(dtypeobj, _nt.complexfloating):
+        #    if issubclass(dtypeobj, _nt.clongfloat):
+        #        format_function = formatdict['longcomplexfloat']
+        #    else:
+        #        format_function = formatdict['complexfloat']
         elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
             format_function = formatdict['numpystr']
         elif issubclass(dtypeobj, _nt.datetime64):
@@ -437,20 +426,17 @@
 
     if a.shape == ():
         x = a.item()
-        if isna(x):
-            lst = str(x).replace('NA', _na_str, 1)
-        else:
-            try:
-                lst = a._format(x)
-                msg = "The `_format` attribute is deprecated in Numpy " \
-                      "2.0 and will be removed in 2.1. Use the " \
-                      "`formatter` kw instead."
-                import warnings
-                warnings.warn(msg, DeprecationWarning)
-            except AttributeError:
-                if isinstance(x, tuple):
-                    x = _convert_arrays(x)
-                lst = style(x)
+        try:
+            lst = a._format(x)
+            msg = "The `_format` attribute is deprecated in Numpy " \
+                  "2.0 and will be removed in 2.1. Use the " \
+                  "`formatter` kw instead."
+            import warnings
+            warnings.warn(msg, DeprecationWarning)
+        except AttributeError:
+            if isinstance(x, tuple):
+                x = _convert_arrays(x)
+            lst = style(x)
     elif reduce(product, a.shape) == 0:
         # treat as a null array if any of shape elements == 0
         lst = "[]"
@@ -542,38 +528,33 @@
         self.exp_format = False
         self.large_exponent = False
         self.max_str_len = 0
-        #try:
-        self.fillFormat(data)
-        #except (TypeError, NotImplementedError):
+        try:
+            self.fillFormat(data)
+        except (TypeError, NotImplementedError):
             # if reduce(data) fails, this instance will not be called, just
             # instantiated in formatdict.
-            #pass
+            pass
 
     def fillFormat(self, data):
         import numeric as _nc
-        # XXX pypy unimplemented
-        #errstate = _nc.seterr(all='ignore')
+        errstate = _nc.seterr(all='ignore')
         try:
-            special = isnan(data) | isinf(data) | isna(data)
-            special[isna(data)] = False
+            special = isnan(data) | isinf(data)
             valid = not_equal(data, 0) & ~special
-            valid[isna(data)] = False
             non_zero = absolute(data.compress(valid))
             if len(non_zero) == 0:
                 max_val = 0.
                 min_val = 0.
             else:
-                max_val = maximum.reduce(non_zero, skipna=True)
-                min_val = minimum.reduce(non_zero, skipna=True)
+                max_val = maximum.reduce(non_zero)
+                min_val = minimum.reduce(non_zero)
                 if max_val >= 1.e8:
                     self.exp_format = True
                 if not self.suppress_small and (min_val < 0.0001
                                            or max_val/min_val > 1000.):
                     self.exp_format = True
         finally:
-            pass
-            # XXX pypy unimplemented
-            #_nc.seterr(**errstate)
+            _nc.seterr(**errstate)
 
         if self.exp_format:
             self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
@@ -594,11 +575,10 @@
                 precision = 0
             precision = min(self.precision, precision)
             self.max_str_len = len(str(int(max_val))) + precision + 2
-            if special.any():
+            if _nc.any(special):
                 self.max_str_len = max(self.max_str_len,
                                        len(_nan_str),
-                                       len(_inf_str)+1,
-                                       len(_na_str))
+                                       len(_inf_str)+1)
             if self.sign:
                 format = '%#+'
             else:
@@ -610,11 +590,9 @@
 
     def __call__(self, x, strip_zeros=True):
         import numeric as _nc
-        #err = _nc.seterr(invalid='ignore')
+        err = _nc.seterr(invalid='ignore')
         try:
-            if isna(x):
-                return self.special_fmt % (str(x).replace('NA', _na_str, 1),)
-            elif isnan(x):
+            if isnan(x):
                 if self.sign:
                     return self.special_fmt % ('+' + _nan_str,)
                 else:
@@ -628,8 +606,7 @@
                 else:
                     return self.special_fmt % ('-' + _inf_str,)
         finally:
-            pass
-            #_nc.seterr(**err)
+            _nc.seterr(**err)
 
         s = self.format % x
         if self.large_exponent:
@@ -658,10 +635,10 @@
 class IntegerFormat(object):
     def __init__(self, data):
         try:
-            max_str_len = max(len(str(maximum.reduce(data, skipna=True))),
-                              len(str(minimum.reduce(data, skipna=True))))
+            max_str_len = max(len(str(maximum.reduce(data))),
+                              len(str(minimum.reduce(data))))
             self.format = '%' + str(max_str_len) + 'd'
-        except TypeError, NotImplementedError:
+        except (TypeError, NotImplementedError):
             # if reduce(data) fails, this instance will not be called, just
             # instantiated in formatdict.
             pass
@@ -670,9 +647,7 @@
             pass
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        elif _MININT < x < _MAXINT:
+        if _MININT < x < _MAXINT:
             return self.format % x
         else:
             return "%s" % x
@@ -685,9 +660,7 @@
         self.sign = sign
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        elif isnan(x):
+        if isnan(x):
             if self.sign:
                 return '+' + _nan_str
             else:
@@ -715,12 +688,9 @@
         self.imag_format = LongFloatFormat(precision, sign=True)
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        else:
-            r = self.real_format(x.real)
-            i = self.imag_format(x.imag)
-            return r + i + 'j'
+        r = self.real_format(x.real)
+        i = self.imag_format(x.imag)
+        return r + i + 'j'
 
 
 class ComplexFormat(object):
@@ -730,17 +700,14 @@
                                        sign=True)
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
+        r = self.real_format(x.real, strip_zeros=False)
+        i = self.imag_format(x.imag, strip_zeros=False)
+        if not self.imag_format.exp_format:
+            z = i.rstrip('0')
+            i = z + 'j' + ' '*(len(i)-len(z))
         else:
-            r = self.real_format(x.real, strip_zeros=False)
-            i = self.imag_format(x.imag, strip_zeros=False)
-            if not self.imag_format.exp_format:
-                z = i.rstrip('0')
-                i = z + 'j' + ' '*(len(i)-len(z))
-            else:
-                i = i + 'j'
-            return r + i
+            i = i + 'j'
+        return r + i
 
 class DatetimeFormat(object):
     def __init__(self, x, unit=None,
@@ -765,13 +732,10 @@
         self.casting = casting
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        else:
-            return "'%s'" % datetime_as_string(x,
-                                        unit=self.unit,
-                                        timezone=self.timezone,
-                                        casting=self.casting)
+        return "'%s'" % datetime_as_string(x,
+                                    unit=self.unit,
+                                    timezone=self.timezone,
+                                    casting=self.casting)
 
 class TimedeltaFormat(object):
     def __init__(self, data):
@@ -782,8 +746,5 @@
             self.format = '%' + str(max_str_len) + 'd'
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        else:
-            return self.format % x.astype('i8')
+        return self.format % x.astype('i8')
 
diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py
--- a/lib_pypy/numpypy/core/numeric.py
+++ b/lib_pypy/numpypy/core/numeric.py
@@ -1,6 +1,7 @@
 
 from _numpypy import array, ndarray, int_, float_, bool_ #, complex_# , longlong
 from _numpypy import concatenate
+from .fromnumeric import any
 import math
 import sys
 import _numpypy as multiarray # ARGH
@@ -8,7 +9,11 @@
 
 newaxis = None
 
-def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
+# XXX this file to be reviewed
+def seterr(**args):
+    return args
+
+def asanyarray(a, dtype=None, order=None):
     """
     Convert the input to an ndarray, but pass ndarray subclasses through.
 
@@ -23,13 +28,6 @@
     order : {'C', 'F'}, optional
         Whether to use row-major ('C') or column-major ('F') memory
         representation.  Defaults to 'C'.
-   maskna : bool or None, optional
-        If this is set to True, it forces the array to have an NA mask.
-        If this is set to False, it forces the array to not have an NA
-        mask.
-    ownmaskna : bool, optional
-        If this is set to True, forces the array to have a mask which
-        it owns.
 
     Returns
     -------
@@ -65,8 +63,7 @@
     True
 
     """
-    return array(a, dtype, copy=False, order=order, subok=True,
-                                maskna=maskna, ownmaskna=ownmaskna)
+    return array(a, dtype, copy=False, order=order, subok=True)
 
 def base_repr(number, base=2, padding=0):
     """
@@ -347,7 +344,7 @@
         return False
     return bool((a1 == a2).all())
 
-def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
+def asarray(a, dtype=None, order=None):
     """
     Convert the input to an array.
 
@@ -362,13 +359,6 @@
     order : {'C', 'F'}, optional
         Whether to use row-major ('C') or column-major ('F' for FORTRAN)
         memory representation.  Defaults to 'C'.
-   maskna : bool or None, optional
-        If this is set to True, it forces the array to have an NA mask.
-        If this is set to False, it forces the array to not have an NA
-        mask.
-    ownmaskna : bool, optional
-        If this is set to True, forces the array to have a mask which
-        it owns.
 
     Returns
     -------
@@ -422,8 +412,7 @@
     True
 
     """
-    return array(a, dtype, copy=False, order=order,
-                            maskna=maskna, ownmaskna=ownmaskna)
+    return array(a, dtype, copy=False, order=order)
 
 set_string_function(array_str, 0)
 set_string_function(array_repr, 1)
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -34,7 +34,7 @@
      "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
      "_bisect", "binascii", "_multiprocessing", '_warnings',
      "_collections", "_multibytecodec", "micronumpy", "_ffi",
-     "_continuation", "_cffi_backend"]
+     "_continuation", "_cffi_backend", "_csv"]
 ))
 
 translation_modules = default_modules.copy()
diff --git a/pypy/doc/arm.rst b/pypy/doc/arm.rst
--- a/pypy/doc/arm.rst
+++ b/pypy/doc/arm.rst
@@ -23,7 +23,7 @@
 
 The tools required to cross translate from a Linux based host to an ARM based Linux target are:
 
-- A checkout of PyPy's arm-backend-2 branch.
+- A checkout of PyPy (default branch).
 - The GCC ARM cross compiler (on Ubuntu it is the ``gcc-arm-linux-gnueabi package``) but other toolchains should also work.
 - Scratchbox 2, a cross-compilation engine (``scratchbox2`` Ubuntu package).
 - A 32-bit PyPy or Python.
@@ -147,4 +147,4 @@
       return 0
 
   def target(*args):
-      return main, None
\ No newline at end of file
+      return main, None
diff --git a/pypy/doc/config/objspace.usemodules._csv.txt b/pypy/doc/config/objspace.usemodules._csv.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.usemodules._csv.txt
@@ -0,0 +1,2 @@
+Implementation in RPython for the core of the 'csv' module
+
diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst
--- a/pypy/doc/jit/pyjitpl5.rst
+++ b/pypy/doc/jit/pyjitpl5.rst
@@ -149,7 +149,7 @@
 
 A *virtual* value is an array, struct, or RPython level instance that is created
 during the loop and does not escape from it via calls or longevity past the
-loop.  Since it is only used by the JIT, it be "optimized out"; the value
+loop.  Since it is only used by the JIT, it can be "optimized out"; the value
 doesn't have to be allocated at all and its fields can be stored as first class
 values instead of deferencing them in memory.  Virtuals allow temporary objects
 in the interpreter to be unwrapped.  For example, a W_IntObject in the PyPy can
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -21,7 +21,7 @@
 -------------------------
 
 PyPy's implementation of the Python ``long`` type is slower than CPython's.
-Find out why and optimize them.
+Find out why and optimize them.  **UPDATE:** this was done (thanks stian).
 
 Make bytearray type fast
 ------------------------
@@ -103,13 +103,35 @@
 
 * A concurrent garbage collector (a lot of work)
 
-STM, a.k.a. "remove the GIL"
-----------------------------
+STM (Software Transactional Memory)
+-----------------------------------
 
-Removing the GIL --- or more precisely, a GIL-less thread-less solution ---
-is `now work in progress.`__  Contributions welcome.
+This is work in progress.  Besides the main development path, whose goal is
+to make a (relatively fast) version of pypy which includes STM, there are
+independent topics that can already be experimented with on the existing,
+JIT-less pypy-stm version:
+  
+* What kind of conflicts do we get in real use cases?  And, sometimes,
+  which data structures would be more appropriate?  For example, a dict
+  implemented as a hash table will suffer "stm collisions" in all threads
+  whenever one thread writes anything to it; but there could be other
+  implementations.  Maybe alternate strategies can be implemented at the
+  level of the Python interpreter (see list/dict strategies,
+  ``pypy/objspace/std/{list,dict}object.py``).
 
-.. __: http://pypy.org/tmdonate.html
+* More generally, there is the idea that we would need some kind of
+  "debugger"-like tool to "debug" things that are not bugs, but stm
+  conflicts.  How would this tool look like to the end Python
+  programmers?  Like a profiler?  Or like a debugger with breakpoints
+  on aborted transactions?  It would probably be all app-level, with
+  a few hooks e.g. for transaction conflicts.
+
+* Find good ways to have libraries using internally threads and atomics,
+  but not exposing threads to the user.  Right now there is a rough draft
+  in ``lib_pypy/transaction.py``, but much better is possible.  For example
+  we could probably have an iterator-like concept that allows each loop
+  iteration to run in parallel.
+
 
 Introduce new benchmarks
 ------------------------
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -16,6 +16,11 @@
 
 .. branch: iterator-in-rpython
 .. branch: numpypy_count_nonzero
+.. branch: numpy-refactor
+Remove numpy lazy evaluation and simplify everything
+.. branch: numpy-reintroduce-jit-drivers
+.. branch: numpy-fancy-indexing
+Support for array[array-of-ints] in numpy
 .. branch: even-more-jit-hooks
 Implement better JIT hooks
 .. branch: virtual-arguments
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -65,24 +65,44 @@
         self.marked = False
         self.have_return = False
 
-    def _post_order(self, blocks):
-        if self.marked:
-            return
-        self.marked = True
-        if self.next_block is not None:
-            self.next_block._post_order(blocks)
-        for instr in self.instructions:
-            if instr.has_jump:
-                instr.jump[0]._post_order(blocks)
-        blocks.append(self)
-        self.marked = True
+    def _post_order_see(self, stack, nextblock):
+        if nextblock.marked == 0:
+            nextblock.marked = 1
+            stack.append(nextblock)
 
     def post_order(self):
-        """Return this block and its children in post order."""
-        blocks = []
-        self._post_order(blocks)
-        blocks.reverse()
-        return blocks
+        """Return this block and its children in post order.
+        This means that the graph of blocks is first cleaned up to
+        ignore back-edges, thus turning it into a DAG.  Then the DAG
+        is linearized.  For example:
+
+                   A --> B -\           =>     [A, D, B, C]
+                     \-> D ---> C
+        """
+        resultblocks = []
+        stack = [self]
+        self.marked = 1
+        while stack:
+            current = stack[-1]
+            if current.marked == 1:
+                current.marked = 2
+                if current.next_block is not None:
+                    self._post_order_see(stack, current.next_block)
+            else:
+                i = current.marked - 2
+                assert i >= 0
+                while i < len(current.instructions):
+                    instr = current.instructions[i]
+                    i += 1
+                    if instr.has_jump:
+                        current.marked = i + 2
+                        self._post_order_see(stack, instr.jump[0])
+                        break
+                else:
+                    resultblocks.append(current)
+                    stack.pop()
+        resultblocks.reverse()
+        return resultblocks
 
     def code_size(self):
         """Return the encoded size of all the instructions in this block."""
@@ -353,20 +373,26 @@
     def _stacksize(self, blocks):
         """Compute co_stacksize."""
         for block in blocks:
-            block.marked = False
-            block.initial_depth = -1000
-        return self._recursive_stack_depth_walk(blocks[0], 0, 0)
+            block.initial_depth = 0
+        # Assumes that it is sufficient to walk the blocks in 'post-order'.
+        # This means we ignore all back-edges, but apart from that, we only
+        # look into a block when all the previous blocks have been done.
+        self._max_depth = 0
+        for block in blocks:
+            self._do_stack_depth_walk(block)
+        return self._max_depth
 
-    def _recursive_stack_depth_walk(self, block, depth, max_depth):
-        if block.marked or block.initial_depth >= depth:
-            return max_depth
-        block.marked = True
-        block.initial_depth = depth
+    def _next_stack_depth_walk(self, nextblock, depth):
+        if depth > nextblock.initial_depth:
+            nextblock.initial_depth = depth
+
+    def _do_stack_depth_walk(self, block):
+        depth = block.initial_depth
         done = False
         for instr in block.instructions:
             depth += _opcode_stack_effect(instr.opcode, instr.arg)
-            if depth >= max_depth:
-                max_depth = depth
+            if depth >= self._max_depth:
+                self._max_depth = depth
             if instr.has_jump:
                 target_depth = depth
                 jump_op = instr.opcode
@@ -376,20 +402,15 @@
                       jump_op == ops.SETUP_EXCEPT or
                       jump_op == ops.SETUP_WITH):
                     target_depth += 3
-                    if target_depth > max_depth:
-                        max_depth = target_depth
-                max_depth = self._recursive_stack_depth_walk(instr.jump[0],
-                                                             target_depth,
-                                                             max_depth)
+                    if target_depth > self._max_depth:
+                        self._max_depth = target_depth
+                self._next_stack_depth_walk(instr.jump[0], target_depth)
                 if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
                     # Nothing more can occur.
                     done = True
                     break
         if block.next_block and not done:
-            max_depth = self._recursive_stack_depth_walk(block.next_block,
-                                                         depth, max_depth)
-        block.marked = False
-        return max_depth
+            max_depth = self._next_stack_depth_walk(block.next_block, depth)
 
     def _build_lnotab(self, blocks):
         """Build the line number table for tracebacks and tracing."""
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -474,7 +474,7 @@
                 if f_type == F_BLOCK_LOOP:
                     self.emit_jump(ops.CONTINUE_LOOP, block, True)
                     break
-                if self.frame_blocks[i][0] == F_BLOCK_FINALLY_END:
+                if f_type == F_BLOCK_FINALLY_END:
                     self.error("'continue' not supported inside 'finally' " \
                                    "clause",
                                cont)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -778,6 +778,10 @@
             raise AssertionError("attribute not removed")"""
         yield self.st, test, "X.__name__", "X"
 
+    def test_lots_of_loops(self):
+        source = "for x in y: pass\n" * 1000
+        compile_with_astcompiler(source, 'exec', self.space)
+
 
 class AppTestCompiler:
 
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -208,11 +208,11 @@
     def int_w(self, space):
         raise OperationError(space.w_TypeError,
                              typed_unwrap_error_msg(space, "integer", self))
-    
+
     def uint_w(self, space):
         raise OperationError(space.w_TypeError,
                              typed_unwrap_error_msg(space, "integer", self))
-    
+
     def bigint_w(self, space):
         raise OperationError(space.w_TypeError,
                              typed_unwrap_error_msg(space, "integer", self))
@@ -292,8 +292,6 @@
     """Base class for the interpreter-level implementations of object spaces.
     http://pypy.readthedocs.org/en/latest/objspace.html"""
 
-    full_exceptions = True  # full support for exceptions (normalization & more)
-
     def __init__(self, config=None):
         "NOT_RPYTHON: Basic initialization of objects."
         self.fromcache = InternalSpaceCache(self).getorbuild
@@ -590,10 +588,6 @@
                 w_exc = self.getitem(w_dic, w_name)
                 exc_types_w[name] = w_exc
                 setattr(self, "w_" + excname, w_exc)
-        # Make a prebuilt recursion error
-        w_msg = self.wrap("maximum recursion depth exceeded")
-        self.prebuilt_recursion_error = OperationError(self.w_RuntimeError,
-                                                       w_msg)
         return exc_types_w
 
     def install_mixedmodule(self, mixedname, installed_builtin_modules):
@@ -838,7 +832,8 @@
         return isinstance(obj, RequiredClass)
 
     def unpackiterable(self, w_iterable, expected_length=-1):
-        """Unpack an iterable object into a real (interpreter-level) list.
+        """Unpack an iterable into a real (interpreter-level) list.
+
         Raise an OperationError(w_ValueError) if the length is wrong."""
         w_iterator = self.iter(w_iterable)
         if expected_length == -1:
@@ -858,12 +853,10 @@
     def iteriterable(self, w_iterable):
         return W_InterpIterable(self, w_iterable)
 
-    @jit.dont_look_inside
     def _unpackiterable_unknown_length(self, w_iterator, w_iterable):
-        # Unpack a variable-size list of unknown length.
-        # The JIT does not look inside this function because it
-        # contains a loop (made explicit with the decorator above).
-        #
+        """Unpack an iterable of unknown length into an interp-level
+        list.
+        """
         # If we can guess the expected length we can preallocate.
         try:
             lgt_estimate = self.len_w(w_iterable)
@@ -1133,13 +1126,9 @@
     def exception_is_valid_obj_as_class_w(self, w_obj):
         if not self.isinstance_w(w_obj, self.w_type):
             return False
-        if not self.full_exceptions:
-            return True
         return self.is_true(self.issubtype(w_obj, self.w_BaseException))
 
     def exception_is_valid_class_w(self, w_cls):
-        if not self.full_exceptions:
-            return True
         return self.is_true(self.issubtype(w_cls, self.w_BaseException))
 
     def exception_getclass(self, w_obj):
@@ -1390,7 +1379,7 @@
         if not self.is_true(self.isinstance(w_obj, self.w_str)):
             raise OperationError(self.w_TypeError,
                                  self.wrap('argument must be a string'))
-        return self.str_w(w_obj)            
+        return self.str_w(w_obj)
 
     def unicode_w(self, w_obj):
         return w_obj.unicode_w(self)
@@ -1711,7 +1700,7 @@
     'ValueError',
     'ZeroDivisionError',
     ]
-    
+
 if sys.platform.startswith("win"):
     ObjSpace.ExceptionTable += ['WindowsError']
 
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -21,9 +21,7 @@
     _application_traceback = None
 
     def __init__(self, w_type, w_value, tb=None):
-        if not we_are_translated() and w_type is None:
-            from pypy.tool.error import FlowingError
-            raise FlowingError(w_value)
+        assert w_type is not None
         self.setup(w_type)
         self._w_value = w_value
         self._application_traceback = tb
@@ -47,11 +45,6 @@
 
     def async(self, space):
         "Check if this is an exception that should better not be caught."
-        if not space.full_exceptions:
-            # flow objspace does not support such exceptions and more
-            # importantly, raises KeyboardInterrupt if you try to access
-            # space.w_KeyboardInterrupt
-            return False
         return (self.match(space, space.w_SystemExit) or
                 self.match(space, space.w_KeyboardInterrupt))
 
@@ -168,9 +161,7 @@
         # Or 'Class' can also be an old-style class and 'inst' an old-style
         # instance of it.
         #
-        # Note that 'space.full_exceptions' is set to False by the flow
-        # object space; in this case we must assume that we are in a
-        # non-advanced case, and ignore the advanced cases.  Old-style
+        # The flow object space only deals with non-advanced case. Old-style
         # classes and instances *are* advanced.
         #
         #  input (w_type, w_value)... becomes...                advanced case?
@@ -185,9 +176,8 @@
         #
         w_type  = self.w_type
         w_value = self.get_w_value(space)
-        if space.full_exceptions:
-            while space.is_true(space.isinstance(w_type, space.w_tuple)):
-                w_type = space.getitem(w_type, space.wrap(0))
+        while space.is_true(space.isinstance(w_type, space.w_tuple)):
+            w_type = space.getitem(w_type, space.wrap(0))
 
         if space.exception_is_valid_obj_as_class_w(w_type):
             # this is for all cases of the form (Class, something)
@@ -201,8 +191,7 @@
                     # raise Type, Instance: let etype be the exact type of value
                     w_type = w_valuetype
                 else:
-                    if space.full_exceptions and space.is_true(
-                        space.isinstance(w_value, space.w_tuple)):
+                    if space.is_true(space.isinstance(w_value, space.w_tuple)):
                         # raise Type, tuple: assume the tuple contains the
                         #                    constructor args
                         w_value = space.call(w_type, w_value)
@@ -327,9 +316,7 @@
                 self.xstrings = strings
                 for i, attr in entries:
                     setattr(self, attr, args[i])
-                if not we_are_translated() and w_type is None:
-                    from pypy.tool.error import FlowingError
-                    raise FlowingError(self._compute_value())
+                assert w_type is not None
             def _compute_value(self):
                 lst = [None] * (len(formats) + len(formats) + 1)
                 for i, attr in entries:
@@ -393,7 +380,7 @@
         return OperationError(exc, w_error)
 
 def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError',
-                  w_exception_class=None): 
+                  w_exception_class=None):
     assert isinstance(e, OSError)
 
     if _WINDOWS and isinstance(e, WindowsError):
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -158,17 +158,13 @@
             self._trace(frame, 'exception', None, operationerr)
         #operationerr.print_detailed_traceback(self.space)
 
-    def _convert_exc(self, operr):
-        # Only for the flow object space
-        return operr
-
     def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!!
         """Implements sys.exc_info().
         Return an OperationError instance or None."""
         frame = self.gettopframe_nohidden()
         while frame:
             if frame.last_exception is not None:
-                return self._convert_exc(frame.last_exception)
+                return frame.last_exception
             frame = self.getnextframe_nohidden(frame)
         return None
 
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -651,7 +651,8 @@
             raise OperationError(space.w_MemoryError, space.w_None)
         except rstackovf.StackOverflow, e:
             rstackovf.check_stack_overflow()
-            raise space.prebuilt_recursion_error
+            raise OperationError(space.w_RuntimeError,
+                                space.wrap("maximum recursion depth exceeded"))
         except RuntimeError:   # not on top of py.py
             raise OperationError(space.w_RuntimeError, space.w_None)
 
@@ -943,14 +944,6 @@
         def appcaller(space, *args_w):
             if not isinstance(space, ObjSpace):
                 raise TypeError("first argument must be a space instance.")
-            # redirect if the space handles this specially
-            # XXX can this be factored a bit less flow space dependently?
-            if hasattr(space, 'specialcases'):
-                sc = space.specialcases
-                if ApplevelClass in sc:
-                    ret_w = sc[ApplevelClass](space, self, name, args_w)
-                    if ret_w is not None: # it was RPython
-                        return ret_w
             # the last argument can be an Arguments
             w_func = self.wget(space, name)
             if not args_w:
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -136,10 +136,6 @@
             next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
         except OperationError, operr:
             next_instr = self.handle_operation_error(ec, operr)
-        except Reraise:
-            operr = self.last_exception
-            next_instr = self.handle_operation_error(ec, operr,
-                                                     attach_tb=False)
         except RaiseWithExplicitTraceback, e:
             next_instr = self.handle_operation_error(ec, e.operr,
                                                      attach_tb=False)
@@ -150,9 +146,11 @@
             next_instr = self.handle_asynchronous_error(ec,
                 self.space.w_MemoryError)
         except rstackovf.StackOverflow, e:
+            # Note that this case catches AttributeError!
             rstackovf.check_stack_overflow()
-            w_err = self.space.prebuilt_recursion_error
-            next_instr = self.handle_operation_error(ec, w_err)
+            next_instr = self.handle_asynchronous_error(ec,
+                self.space.w_RuntimeError,
+                self.space.wrap("maximum recursion depth exceeded"))
         return next_instr
 
     def handle_asynchronous_error(self, ec, w_type, w_value=None):
@@ -275,7 +273,7 @@
                         next_instr = block.handle(self, unroller)
 
             elif opcode == self.opcodedesc.JUMP_ABSOLUTE.index:
-                next_instr = self.jump_absolute(oparg, next_instr, ec)
+                next_instr = self.jump_absolute(oparg, ec)
 
             elif we_are_translated():
                 for opdesc in unrolling_all_opcode_descs:
@@ -609,7 +607,7 @@
             ec = self.space.getexecutioncontext()
             while frame:
                 if frame.last_exception is not None:
-                    operror = ec._convert_exc(frame.last_exception)
+                    operror = frame.last_exception
                     break
                 frame = frame.f_backref()
             else:
@@ -617,7 +615,7 @@
                     space.wrap("raise: no active exception to re-raise"))
             # re-raise, no new traceback obj will be attached
             self.last_exception = operror
-            raise Reraise
+            raise RaiseWithExplicitTraceback(operror)
 
         w_value = w_traceback = space.w_None
         if nbargs >= 3:
@@ -628,7 +626,7 @@
             w_type = self.popvalue()
         operror = OperationError(w_type, w_value)
         operror.normalize_exception(space)
-        if not space.full_exceptions or space.is_w(w_traceback, space.w_None):
+        if space.is_w(w_traceback, space.w_None):
             # common case
             raise operror
         else:
@@ -929,7 +927,8 @@
     def YIELD_VALUE(self, oparg, next_instr):
         raise Yield
 
-    def jump_absolute(self, jumpto, next_instr, ec):
+    def jump_absolute(self, jumpto, ec):
+        # this function is overridden by pypy.module.pypyjit.interp_jit
         check_nonneg(jumpto)
         return jumpto
 
@@ -1015,26 +1014,15 @@
 
     def WITH_CLEANUP(self, oparg, next_instr):
         # see comment in END_FINALLY for stack state
-        # This opcode changed a lot between CPython versions
-        if (self.pycode.magic >= 0xa0df2ef
-            # Implementation since 2.7a0: 62191 (introduce SETUP_WITH)
-            or self.pycode.magic >= 0xa0df2d1):
-            # implementation since 2.6a1: 62161 (WITH_CLEANUP optimization)
-            w_unroller = self.popvalue()
-            w_exitfunc = self.popvalue()
-            self.pushvalue(w_unroller)
-        elif self.pycode.magic >= 0xa0df28c:
-            # Implementation since 2.5a0: 62092 (changed WITH_CLEANUP opcode)
-            w_exitfunc = self.popvalue()
-            w_unroller = self.peekvalue(0)
-        else:
-            raise NotImplementedError("WITH_CLEANUP for CPython <= 2.4")
-
+        w_unroller = self.popvalue()
+        w_exitfunc = self.popvalue()
+        self.pushvalue(w_unroller)
         unroller = self.space.interpclass_w(w_unroller)
         is_app_exc = (unroller is not None and
                       isinstance(unroller, SApplicationException))
         if is_app_exc:
             operr = unroller.operr
+            self.last_exception = operr
             w_traceback = self.space.wrap(operr.get_traceback())
             w_suppress = self.call_contextmanager_exit_function(
                 w_exitfunc,
@@ -1234,10 +1222,8 @@
 class Yield(ExitFrame):
     """Raised when exiting a frame via a 'yield' statement."""
 
-class Reraise(Exception):
-    """Raised at interp-level by a bare 'raise' statement."""
 class RaiseWithExplicitTraceback(Exception):
-    """Raised at interp-level by a 3-arguments 'raise' statement."""
+    """Raised at interp-level by a 0- or 3-arguments 'raise' statement."""
     def __init__(self, operr):
         self.operr = operr
 
@@ -1264,10 +1250,6 @@
     def nomoreblocks(self):
         raise BytecodeCorruption("misplaced bytecode - should not return")
 
-    # NB. for the flow object space, the state_(un)pack_variables methods
-    # give a way to "pickle" and "unpickle" the SuspendedUnroller by
-    # enumerating the Variables it contains.
-
 class SReturnValue(SuspendedUnroller):
     """Signals a 'return' statement.
     Argument is the wrapped object to return."""
@@ -1278,12 +1260,6 @@
     def nomoreblocks(self):
         return self.w_returnvalue
 
-    def state_unpack_variables(self, space):
-        return [self.w_returnvalue]
-    def state_pack_variables(space, w_returnvalue):
-        return SReturnValue(w_returnvalue)
-    state_pack_variables = staticmethod(state_pack_variables)
-
 class SApplicationException(SuspendedUnroller):
     """Signals an application-level exception
     (i.e. an OperationException)."""
@@ -1294,23 +1270,10 @@
     def nomoreblocks(self):
         raise RaiseWithExplicitTraceback(self.operr)
 
-    def state_unpack_variables(self, space):
-        return [self.operr.w_type, self.operr.get_w_value(space)]
-    def state_pack_variables(space, w_type, w_value):
-        return SApplicationException(OperationError(w_type, w_value))
-    state_pack_variables = staticmethod(state_pack_variables)
-
 class SBreakLoop(SuspendedUnroller):
     """Signals a 'break' statement."""
     _immutable_ = True
     kind = 0x04
-
-    def state_unpack_variables(self, space):
-        return []
-    def state_pack_variables(space):
-        return SBreakLoop.singleton
-    state_pack_variables = staticmethod(state_pack_variables)
-
 SBreakLoop.singleton = SBreakLoop()
 
 class SContinueLoop(SuspendedUnroller):
@@ -1321,12 +1284,6 @@
     def __init__(self, jump_to):
         self.jump_to = jump_to
 
-    def state_unpack_variables(self, space):
-        return [space.wrap(self.jump_to)]
-    def state_pack_variables(space, w_jump_to):
-        return SContinueLoop(space.int_w(w_jump_to))
-    state_pack_variables = staticmethod(state_pack_variables)
-
 
 class FrameBlock(object):
     """Abstract base class for frame blocks from the blockstack,
@@ -1381,7 +1338,9 @@
             # and jump to the beginning of the loop, stored in the
             # exception's argument
             frame.append_block(self)
-            return r_uint(unroller.jump_to)
+            jumpto = unroller.jump_to
+            ec = frame.space.getexecutioncontext()
+            return r_uint(frame.jump_absolute(jumpto, ec))
         else:
             # jump to the end of the loop
             self.cleanupstack(frame)
@@ -1401,8 +1360,7 @@
         self.cleanupstack(frame)
         assert isinstance(unroller, SApplicationException)
         operationerr = unroller.operr
-        if frame.space.full_exceptions:
-            operationerr.normalize_exception(frame.space)
+        operationerr.normalize_exception(frame.space)
         # the stack setup is slightly different than in CPython:
         # instead of the traceback, we store the unroller object,
         # wrapped.
@@ -1434,8 +1392,7 @@
     _immutable_ = True
 
     def handle(self, frame, unroller):
-        if (frame.space.full_exceptions and
-            isinstance(unroller, SApplicationException)):
+        if isinstance(unroller, SApplicationException):
             unroller.operr.normalize_exception(frame.space)
         return FinallyBlock.handle(self, frame, unroller)
 
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -1,3 +1,4 @@
+from __future__ import with_statement
 import py
 from pypy.conftest import gettestobjspace
 
@@ -567,6 +568,24 @@
         import types
         assert isinstance(acontextfact.exit_params[2], types.TracebackType)
 
+    def test_with_reraise_exception(self):
+        class Context:
+            def __enter__(self):
+                self.calls = []
+            def __exit__(self, exc_type, exc_value, exc_tb):
+                self.calls.append('exit')
+                raise
+
+        c = Context()
+        try:
+            with c:
+                1 / 0
+        except ZeroDivisionError:
+            pass
+        else:
+            raise AssertionError('Should have reraised initial exception')
+        assert c.calls == ['exit']
+
     def test_with_break(self):
 
         s = """
diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py
--- a/pypy/jit/backend/model.py
+++ b/pypy/jit/backend/model.py
@@ -131,13 +131,13 @@
     def get_latest_value_float(self, index):
         """Returns the value for the index'th argument to the
         last executed operation (from 'fail_args' if it was a guard,
-        or from 'args' if it was a FINISH).  Returns a float."""
+        or from 'args' if it was a FINISH).  Returns a FLOATSTORAGE."""
         raise NotImplementedError
 
     def get_latest_value_ref(self, index):
         """Returns the value for the index'th argument to the
         last executed operation (from 'fail_args' if it was a guard,
-        or from 'args' if it was a FINISH).  Returns a ptr or an obj."""
+        or from 'args' if it was a FINISH).  Returns a GCREF."""
         raise NotImplementedError
 
     def get_latest_value_count(self):
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -994,6 +994,7 @@
                              ('p', lltype.Ptr(TP)))
         a_box, A = self.alloc_array_of(ITEM, 15)
         s_box, S = self.alloc_instance(TP)
+        vsdescr = self.cpu.interiorfielddescrof(A, 'vs')
         kdescr = self.cpu.interiorfielddescrof(A, 'k')
         pdescr = self.cpu.interiorfielddescrof(A, 'p')
         self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, BoxInt(3),
@@ -1045,6 +1046,13 @@
         r = self.execute_operation(rop.GETINTERIORFIELD_GC, [a_box, BoxInt(3)],
                                    'ref', descr=pdescr)
         assert r.getref_base() == s_box.getref_base()
+        #
+        # test a corner case that used to fail on x86
+        i4 = BoxInt(4)
+        self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, i4, i4],
+                               'void', descr=vsdescr)
+        r = self.cpu.bh_getinteriorfield_gc_i(a_box.getref_base(), 4, vsdescr)
+        assert r == 4
 
     def test_string_basic(self):
         s_box = self.alloc_string("hello\xfe")
diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py
--- a/pypy/jit/backend/test/test_random.py
+++ b/pypy/jit/backend/test/test_random.py
@@ -465,6 +465,16 @@
 
 # ____________________________________________________________
 
+def do_assert(condition, error_message):
+    if condition:
+        return
+    seed = pytest.config.option.randomseed
+    message = "%s\nPython: %s\nRandom seed: %r" % (
+        error_message,
+        sys.executable,
+        seed)
+    raise AssertionError(message)
+
 def Random():
     import random
     seed = pytest.config.option.randomseed
@@ -544,6 +554,7 @@
         self.startvars = startvars
         self.prebuilt_ptr_consts = []
         self.r = r
+        self.subloops = []
         self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay)
 
     def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay):
@@ -668,13 +679,15 @@
 
         arguments = [box.value for box in self.loop.inputargs]
         fail = cpu.execute_token(self.runjitcelltoken(), *arguments)
-        assert fail is self.should_fail_by.getdescr()
+        do_assert(fail is self.should_fail_by.getdescr(),
+                  "Got %r, expected %r" % (fail,
+                                           self.should_fail_by.getdescr()))
         for i, v in enumerate(self.get_fail_args()):
             if isinstance(v, (BoxFloat, ConstFloat)):
                 value = cpu.get_latest_value_float(i)
             else:
                 value = cpu.get_latest_value_int(i)
-            assert value == self.expected[v], (
+            do_assert(value == self.expected[v],
                 "Got %r, expected %r for value #%d" % (value,
                                                        self.expected[v],
                                                        i)
@@ -683,9 +696,11 @@
         if (self.guard_op is not None and
             self.guard_op.is_guard_exception()):
             if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
-                assert exc
+                do_assert(exc,
+                          "grab_exc_value() should not be %r" % (exc,))
         else:
-            assert not exc
+            do_assert(not exc,
+                      "unexpected grab_exc_value(): %r" % (exc,))
 
     def build_bridge(self):
         def exc_handling(guard_op):
@@ -710,6 +725,7 @@
             return False
         # generate the branch: a sequence of operations that ends in a FINISH
         subloop = DummyLoop([])
+        self.subloops.append(subloop)   # keep around for debugging
         if guard_op.is_guard_exception():
             subloop.operations.append(exc_handling(guard_op))
         bridge_builder = self.builder.fork(self.builder.cpu, subloop,
@@ -746,9 +762,6 @@
             args = [x.clonebox() for x in subset]
             rl = RandomLoop(self.builder.cpu, self.builder.fork,
                                      r, args)
-            dump(rl.loop)
-            self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations,
-                                  rl.loop._jitcelltoken)
             # done
             self.should_fail_by = rl.should_fail_by
             self.expected = rl.expected
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -1005,14 +1005,18 @@
         # If 'index_loc' is not an immediate, then we need a 'temp_loc' that
         # is a register whose value will be destroyed.  It's fine to destroy
         # the same register as 'index_loc', but not the other ones.
-        self.rm.possibly_free_var(box_index)
         if not isinstance(index_loc, ImmedLoc):
+            # ...that is, except in a corner case where 'index_loc' would be
+            # in the same register as 'value_loc'...
+            if index_loc is not value_loc:
+                self.rm.possibly_free_var(box_index)
             tempvar = TempBox()
             temp_loc = self.rm.force_allocate_reg(tempvar, [box_base,
                                                             box_value])
             self.rm.possibly_free_var(tempvar)
         else:
             temp_loc = None
+        self.rm.possibly_free_var(box_index)
         self.rm.possibly_free_var(box_base)
         self.possibly_free_var(box_value)
         self.PerformDiscard(op, [base_loc, ofs, itemsize, fieldsize,
diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
--- a/pypy/jit/backend/x86/rx86.py
+++ b/pypy/jit/backend/x86/rx86.py
@@ -576,7 +576,7 @@
     J_il8 = insn(immediate(1, 'o'), '\x70', immediate(2, 'b'))
     J_il = insn('\x0F', immediate(1,'o'), '\x80', relative(2))
 
-    SET_ir = insn(rex_w, '\x0F', immediate(1,'o'),'\x90', byte_register(2), '\xC0')
+    SET_ir = insn(rex_fw, '\x0F', immediate(1,'o'),'\x90', byte_register(2), '\xC0')
 
     # The 64-bit version of this, CQO, is defined in X86_64_CodeBuilder
     CDQ = insn(rex_nw, '\x99')
diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
--- a/pypy/jit/metainterp/compile.py
+++ b/pypy/jit/metainterp/compile.py
@@ -106,7 +106,8 @@
 
 def compile_loop(metainterp, greenkey, start,
                  inputargs, jumpargs,
-                 resume_at_jump_descr, full_preamble_needed=True):
+                 resume_at_jump_descr, full_preamble_needed=True,
+                 try_disabling_unroll=False):
     """Try to compile a new procedure by closing the current history back
     to the first operation.
     """
@@ -116,6 +117,13 @@
     jitdriver_sd = metainterp.jitdriver_sd
     history = metainterp.history
 
+    enable_opts = jitdriver_sd.warmstate.enable_opts
+    if try_disabling_unroll:
+        if 'unroll' not in enable_opts:
+            return None
+        enable_opts = enable_opts.copy()
+        del enable_opts['unroll']
+
     jitcell_token = make_jitcell_token(jitdriver_sd)
     part = create_empty_loop(metainterp)
     part.inputargs = inputargs[:]
@@ -126,7 +134,7 @@
                       [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)]
 
     try:
-        optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts)
+        optimize_trace(metainterp_sd, part, enable_opts)
     except InvalidLoop:
         return None
     target_token = part.operations[0].getdescr()
@@ -153,7 +161,7 @@
         jumpargs = part.operations[-1].getarglist()
 
         try:
-            optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts)
+            optimize_trace(metainterp_sd, part, enable_opts)
         except InvalidLoop:
             return None
             
diff --git a/pypy/jit/metainterp/optimizeopt/util.py b/pypy/jit/metainterp/optimizeopt/util.py
--- a/pypy/jit/metainterp/optimizeopt/util.py
+++ b/pypy/jit/metainterp/optimizeopt/util.py
@@ -2,9 +2,10 @@
 from pypy.rlib.objectmodel import r_dict, compute_identity_hash
 from pypy.rlib.rarithmetic import intmask
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.jit.metainterp import resoperation, history
+from pypy.jit.metainterp import resoperation
 from pypy.rlib.debug import make_sure_not_resized
 from pypy.jit.metainterp.resoperation import rop
+from pypy.rlib.objectmodel import we_are_translated
 
 # ____________________________________________________________
 # Misc. utilities
@@ -28,13 +29,20 @@
 def make_dispatcher_method(Class, name_prefix, op_prefix=None, default=None):
     ops = _findall(Class, name_prefix, op_prefix)
     def dispatch(self, op, *args):
-        opnum = op.getopnum()
-        for value, cls, func in ops:
-            if opnum == value:
-                assert isinstance(op, cls)
+        if we_are_translated():
+            opnum = op.getopnum()
+            for value, cls, func in ops:
+                if opnum == value:
+                    assert isinstance(op, cls)
+                    return func(self, op, *args)
+            if default:
+                return default(self, op, *args)
+        else:
+            func = getattr(Class, name_prefix + op.getopname().upper(), None)
+            if func is not None:
                 return func(self, op, *args)
-        if default:
-            return default(self, op, *args)
+            if default:
+                return default(self, op, *args)
     dispatch.func_name = "dispatch_" + name_prefix
     return dispatch
 
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -2039,8 +2039,9 @@
                     memmgr = self.staticdata.warmrunnerdesc.memory_manager
                     if memmgr:
                         if self.cancel_count > memmgr.max_unroll_loops:
-                            self.staticdata.log('cancelled too many times!')
-                            raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP)
+                            self.compile_loop_or_abort(original_boxes,
+                                                       live_arg_boxes,
+                                                       start, resumedescr)
                 self.staticdata.log('cancelled, tracing more...')
 
         # Otherwise, no loop found so far, so continue tracing.
@@ -2140,7 +2141,8 @@
                 return None
         return token
 
-    def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr):
+    def compile_loop(self, original_boxes, live_arg_boxes, start,
+                     resume_at_jump_descr, try_disabling_unroll=False):
         num_green_args = self.jitdriver_sd.num_green_args
         greenkey = original_boxes[:num_green_args]
         if not self.partial_trace:
@@ -2156,7 +2158,8 @@
             target_token = compile.compile_loop(self, greenkey, start,
                                                 original_boxes[num_green_args:],
                                                 live_arg_boxes[num_green_args:],
-                                                resume_at_jump_descr)
+                                                resume_at_jump_descr,
+                                     try_disabling_unroll=try_disabling_unroll)
             if target_token is not None:
                 assert isinstance(target_token, TargetToken)
                 self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token)
@@ -2168,6 +2171,18 @@
             jitcell_token = target_token.targeting_jitcell_token
             self.raise_continue_running_normally(live_arg_boxes, jitcell_token)
 
+    def compile_loop_or_abort(self, original_boxes, live_arg_boxes,
+                              start, resume_at_jump_descr):
+        """Called after we aborted more than 'max_unroll_loops' times.
+        As a last attempt, try to compile the loop with unrolling disabled.
+        """
+        if not self.partial_trace:
+            self.compile_loop(original_boxes, live_arg_boxes, start,
+                              resume_at_jump_descr, try_disabling_unroll=True)
+        #
+        self.staticdata.log('cancelled too many times!')
+        raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP)
+
     def compile_trace(self, live_arg_boxes, resume_at_jump_descr):
         num_green_args = self.jitdriver_sd.num_green_args
         greenkey = live_arg_boxes[:num_green_args]
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -2028,6 +2028,7 @@
                 y -= 1
             return res
         def g(x, y):
+            set_param(myjitdriver, 'max_unroll_loops', 5)
             a1 = f(A(x), y)
             a2 = f(A(x), y)
             b1 = f(B(x), y)
@@ -2734,6 +2735,35 @@
         finally:
             optimizeopt.optimize_trace = old_optimize_trace
 
+    def test_max_unroll_loops_retry_without_unroll(self):
+        from pypy.jit.metainterp.optimize import InvalidLoop
+        from pypy.jit.metainterp import optimizeopt
+        myjitdriver = JitDriver(greens = [], reds = ['n', 'i'])
+        #
+        def f(n, limit):
+            set_param(myjitdriver, 'threshold', 5)
+            set_param(myjitdriver, 'max_unroll_loops', limit)
+            i = 0
+            while i < n:
+                myjitdriver.jit_merge_point(n=n, i=i)
+                print i
+                i += 1
+            return i
+        #
+        seen = []
+        def my_optimize_trace(metainterp_sd, loop, enable_opts, *args, **kwds):
+            seen.append('unroll' in enable_opts)
+            raise InvalidLoop
+        old_optimize_trace = optimizeopt.optimize_trace
+        optimizeopt.optimize_trace = my_optimize_trace
+        try:
+            res = self.meta_interp(f, [23, 4])
+            assert res == 23
+            assert False in seen
+            assert True in seen
+        finally:
+            optimizeopt.optimize_trace = old_optimize_trace
+
     def test_retrace_limit_with_extra_guards(self):
         myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a',
                                                      'node'])
diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py
--- a/pypy/jit/metainterp/test/test_send.py
+++ b/pypy/jit/metainterp/test/test_send.py
@@ -1,5 +1,5 @@
 import py
-from pypy.rlib.jit import JitDriver, promote, elidable
+from pypy.rlib.jit import JitDriver, promote, elidable, set_param
 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
 
@@ -181,6 +181,7 @@
             def getvalue(self):
                 return self.y
         def f(x, y):
+            set_param(myjitdriver, 'max_unroll_loops', 5)
             if x & 1:
                 w = W1(x)
             else:
@@ -226,6 +227,7 @@
         w2 = W2(20)
 
         def f(x, y):
+            set_param(myjitdriver, 'max_unroll_loops', 5)
             if x & 1:
                 w = w1
             else:
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -1,11 +1,13 @@
 from pypy.interpreter.mixedmodule import MixedModule
+from pypy.rlib import rdynload
+
 
 class Module(MixedModule):
 
     appleveldefs = {
         }
     interpleveldefs = {
-        '__version__': 'space.wrap("0.3")',
+        '__version__': 'space.wrap("0.4")',
 
         'nonstandard_integer_types': 'misc.nonstandard_integer_types',
 
@@ -27,7 +29,8 @@
         'alignof': 'func.alignof',
         'sizeof': 'func.sizeof',
         'typeof': 'func.typeof',
-        'offsetof': 'func.offsetof',
+        'typeoffsetof': 'func.typeoffsetof',
+        'rawaddressof': 'func.rawaddressof',
         '_getfields': 'func._getfields',
         'getcname': 'func.getcname',
         '_get_types': 'func._get_types',
@@ -41,3 +44,12 @@
         'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")',
         'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name
         }
+
+for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL",
+              "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]:
+    if getattr(rdynload.cConfig, _name) is not None:
+        Module.interpleveldefs[_name] = 'space.wrap(%d)' % (
+            getattr(rdynload.cConfig, _name),)
+
+for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL"]:
+    Module.interpleveldefs.setdefault(_name, 'space.wrap(0)')
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -286,8 +286,8 @@
         for i, cf in enumerate(ctype.fields_list):
             if cf.is_bitfield():
                 raise OperationError(space.w_NotImplementedError,
-                    space.wrap("cannot pass as argument a struct "
-                               "with bit fields"))
+                    space.wrap("cannot pass as argument or return value "
+                               "a struct with bit fields"))
             ffi_subtype = self.fb_fill_type(cf.ctype, False)
             if elements:
                 elements[i] = ffi_subtype
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -134,14 +134,22 @@
                               "ctype '%s' is of unknown alignment",
                               self.name)
 
-    def offsetof(self, fieldname):
+    def typeoffsetof(self, fieldname):
         space = self.space
-        raise OperationError(space.w_TypeError,
-                             space.wrap("not a struct or union ctype"))
+        if fieldname is None:
+            msg = "expected a struct or union ctype"
+        else:
+            msg = "expected a struct or union ctype, or a pointer to one"
+        raise OperationError(space.w_TypeError, space.wrap(msg))
 
     def _getfields(self):
         return None
 
+    def rawaddressof(self, cdata, offset):
+        space = self.space
+        raise OperationError(space.w_TypeError,
+                             space.wrap("expected a pointer ctype"))
+
     def call(self, funcaddr, args_w):
         space = self.space
         raise operationerrfmt(space.w_TypeError,
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -52,19 +52,25 @@
         if (isinstance(ob, cdataobj.W_CData) and
                isinstance(ob.ctype, ctypeptr.W_CTypePtrOrArray)):
             value = rffi.cast(lltype.Signed, ob._cdata)
-            value = r_ulonglong(value)
+            value = self._cast_result(value)
         elif space.isinstance_w(w_ob, space.w_str):
             value = self.cast_str(w_ob)
-            value = r_ulonglong(value)
+            value = self._cast_result(value)
         elif space.isinstance_w(w_ob, space.w_unicode):
             value = self.cast_unicode(w_ob)
-            value = r_ulonglong(value)
+            value = self._cast_result(value)
         else:
-            value = misc.as_unsigned_long_long(space, w_ob, strict=False)
+            value = self._cast_generic(w_ob)
         w_cdata = cdataobj.W_CDataMem(space, self.size, self)
         w_cdata.write_raw_integer_data(value)
         return w_cdata
 
+    def _cast_result(self, intvalue):
+        return r_ulonglong(intvalue)
+
+    def _cast_generic(self, w_ob):
+        return misc.as_unsigned_long_long(self.space, w_ob, strict=False)
+
     def _overflow(self, w_ob):
         space = self.space
         s = space.str_w(space.str(w_ob))
@@ -163,13 +169,9 @@
             self.vrangemax = (r_ulonglong(1) << sh) - 1
 
     def int(self, cdata):
-        if self.value_fits_long:
-            # this case is to handle enums, but also serves as a slight
-            # performance improvement for some other primitive types
-            value = misc.read_raw_long_data(cdata, self.size)
-            return self.space.wrap(value)
-        else:
-            return self.convert_to_object(cdata)
+        # enums: really call convert_to_object() just below,
+        # and not the one overridden in W_CTypeEnum.
+        return W_CTypePrimitiveSigned.convert_to_object(self, cdata)
 
     def convert_to_object(self, cdata):
         if self.value_fits_long:
@@ -203,8 +205,11 @@
         W_CTypePrimitive.__init__(self, *args)
         self.value_fits_long = self.size < rffi.sizeof(lltype.Signed)
         if self.size < rffi.sizeof(lltype.SignedLongLong):
-            sh = self.size * 8
-            self.vrangemax = (r_ulonglong(1) << sh) - 1
+            self.vrangemax = self._compute_vrange_max()
+
+    def _compute_vrange_max(self):
+        sh = self.size * 8
+        return (r_ulonglong(1) << sh) - 1
 
     def int(self, cdata):
         return self.convert_to_object(cdata)
@@ -231,6 +236,23 @@
         return self
 
 
+class W_CTypePrimitiveBool(W_CTypePrimitiveUnsigned):
+    _attrs_ = []
+
+    def _compute_vrange_max(self):
+        return r_ulonglong(1)
+
+    def _cast_result(self, intvalue):
+        return r_ulonglong(intvalue != 0)
+
+    def _cast_generic(self, w_ob):
+        return misc.object_as_bool(self.space, w_ob)
+
+    def string(self, cdataobj, maxlen):
+        # bypass the method 'string' implemented in W_CTypePrimitive
+        return W_CType.string(self, cdataobj, maxlen)
+
+
 class W_CTypePrimitiveFloat(W_CTypePrimitive):
     _attrs_ = []
 
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -70,7 +70,8 @@
             for i in range(len(lst_w)):
                 ctitem.convert_from_object(cdata, lst_w[i])
                 cdata = rffi.ptradd(cdata, ctitem.size)
-        elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar):
+        elif (self.ctitem.is_primitive_integer and
+              self.ctitem.size == rffi.sizeof(lltype.Char)):
             try:
                 s = space.str_w(w_ob)
             except OperationError, e:
@@ -274,18 +275,26 @@
             return True
         else:
             set_mustfree_flag(cdata, False)
-            try:
-                self.convert_from_object(cdata, w_ob)
-            except OperationError:
-                if (self.is_struct_ptr and isinstance(ob, cdataobj.W_CData)
-                    and ob.ctype is self.ctitem):
-                    # special case to make the life of verifier.py easier:
-                    # if the formal argument type is 'struct foo *' but
-                    # we pass a 'struct foo', then get a pointer to it
-                    rffi.cast(rffi.CCHARPP, cdata)[0] = ob._cdata
-                else:
-                    raise
+            self.convert_from_object(cdata, w_ob)
             return False
 
     def getcfield(self, attr):
         return self.ctitem.getcfield(attr)
+
+    def typeoffsetof(self, fieldname):
+        if fieldname is None:
+            return W_CTypePtrBase.typeoffsetof(self, fieldname)
+        else:
+            return self.ctitem.typeoffsetof(fieldname)
+
+    def rawaddressof(self, cdata, offset):
+        from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion
+        space = self.space
+        ctype2 = cdata.ctype
+        if (isinstance(ctype2, W_CTypeStructOrUnion) or
+            (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)):
+            ptrdata = rffi.ptradd(cdata._cdata, offset)
+            return cdataobj.W_CData(space, ptrdata, self)
+        else:
+            raise OperationError(space.w_TypeError,
+                     space.wrap("expected a 'cdata struct-or-union' object"))
diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
--- a/pypy/module/_cffi_backend/ctypestruct.py
+++ b/pypy/module/_cffi_backend/ctypestruct.py
@@ -61,14 +61,19 @@
         keepalive_until_here(ob)
         return ob
 
-    def offsetof(self, fieldname):
+    def typeoffsetof(self, fieldname):
+        if fieldname is None:
+            return (self, 0)
         self.check_complete()
+        space = self.space
         try:
             cfield = self.fields_dict[fieldname]
         except KeyError:
-            space = self.space
             raise OperationError(space.w_KeyError, space.wrap(fieldname))
-        return cfield.offset
+        if cfield.bitshift >= 0:
+            raise OperationError(space.w_TypeError,
+                                 space.wrap("not supported for bitfields"))
+        return (cfield.ctype, cfield.offset)
 
     def _copy_from_same(self, cdata, w_ob):
         space = self.space
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -53,15 +53,19 @@


More information about the pypy-commit mailing list