[pypy-svn] r57382 - pypy/branch/2.5-features/lib-python/modified-2.5.1/test

bgola at codespeak.net bgola at codespeak.net
Mon Aug 18 01:37:19 CEST 2008


Author: bgola
Date: Mon Aug 18 01:37:16 2008
New Revision: 57382

Added:
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_deque.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_descr.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_descrtut.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_dict.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_enumerate.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_exceptions.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_file.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_format.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_funcattrs.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_generators.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_genexps.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_iter.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_itertools.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_marshal.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_mmap.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_module.py   (contents, props changed)
   pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_mutants.py   (contents, props changed)
Log:
more changes applied to tests (stdlib)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_deque.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_deque.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,632 @@
+from collections import deque
+import unittest
+from test import test_support, seq_tests
+#from weakref import proxy
+import copy
+import cPickle as pickle
+from cStringIO import StringIO
+import random
+import os
+
+BIG = 10
+
+def fail():
+    raise SyntaxError
+    yield 1
+
+class BadCmp:
+    def __eq__(self, other):
+        raise RuntimeError
+
+class MutateCmp:
+    def __init__(self, deque, result):
+        self.deque = deque
+        self.result = result
+    def __eq__(self, other):
+        self.deque.clear()
+        return self.result
+
+class TestBasic(unittest.TestCase):
+
+    def test_basics(self):
+        d = deque(xrange(100))
+        d.__init__(xrange(100, 200))
+        for i in xrange(200, 400):
+            d.append(i)
+        for i in reversed(xrange(-200, 0)):
+            d.appendleft(i)
+        self.assertEqual(list(d), range(-200, 400))
+        self.assertEqual(len(d), 600)
+
+        left = [d.popleft() for i in xrange(250)]
+        self.assertEqual(left, range(-200, 50))
+        self.assertEqual(list(d), range(50, 400))
+
+        right = [d.pop() for i in xrange(250)]
+        right.reverse()
+        self.assertEqual(right, range(150, 400))
+        self.assertEqual(list(d), range(50, 150))
+
+    def test_comparisons(self):
+        d = deque('xabc'); d.popleft()
+        for e in [d, deque('abc'), deque('ab'), deque(), list(d)]:
+            self.assertEqual(d==e, type(d)==type(e) and list(d)==list(e))
+            self.assertEqual(d!=e, not(type(d)==type(e) and list(d)==list(e)))
+
+        args = map(deque, ('', 'a', 'b', 'ab', 'ba', 'abc', 'xba', 'xabc', 'cba'))
+        for x in args:
+            for y in args:
+                self.assertEqual(x == y, list(x) == list(y), (x,y))
+                self.assertEqual(x != y, list(x) != list(y), (x,y))
+                self.assertEqual(x <  y, list(x) <  list(y), (x,y))
+                self.assertEqual(x <= y, list(x) <= list(y), (x,y))
+                self.assertEqual(x >  y, list(x) >  list(y), (x,y))
+                self.assertEqual(x >= y, list(x) >= list(y), (x,y))
+                self.assertEqual(cmp(x,y), cmp(list(x),list(y)), (x,y))
+
+    def test_extend(self):
+        d = deque('a')
+        self.assertRaises(TypeError, d.extend, 1)
+        d.extend('bcd')
+        self.assertEqual(list(d), list('abcd'))
+
+    def test_extendleft(self):
+        d = deque('a')
+        self.assertRaises(TypeError, d.extendleft, 1)
+        d.extendleft('bcd')
+        self.assertEqual(list(d), list(reversed('abcd')))
+        d = deque()
+        d.extendleft(range(1000))
+        self.assertEqual(list(d), list(reversed(range(1000))))
+        self.assertRaises(SyntaxError, d.extendleft, fail())
+
+    def test_getitem(self):
+        n = 10
+        d = deque(xrange(n))
+        l = range(n)
+        for i in xrange(n):
+            d.popleft()
+            l.pop(0)
+            if random.random() < 0.5:
+                d.append(i)
+                l.append(i)
+            for j in xrange(1-len(l), len(l)):
+                assert d[j] == l[j]
+
+        d = deque('superman')
+        self.assertEqual(d[0], 's')
+        self.assertEqual(d[-1], 'n')
+        d = deque()
+        self.assertRaises(IndexError, d.__getitem__, 0)
+        self.assertRaises(IndexError, d.__getitem__, -1)
+
+    def test_setitem(self):
+        n = 10
+        d = deque(xrange(n))
+        for i in xrange(n):
+            d[i] = 10 * i
+        self.assertEqual(list(d), [10*i for i in xrange(n)])
+        l = list(d)
+        for i in xrange(1-n, 0, -1):
+            d[i] = 7*i
+            l[i] = 7*i
+        self.assertEqual(list(d), l)
+
+    def test_delitem(self):
+        n = 10         # O(n**2) test, don't make this too big
+        d = deque(xrange(n))
+        self.assertRaises(IndexError, d.__delitem__, -n-1)
+        self.assertRaises(IndexError, d.__delitem__, n)
+        for i in xrange(n):
+            self.assertEqual(len(d), n-i)
+            j = random.randrange(-len(d), len(d))
+            val = d[j]
+            self.assert_(val in d)
+            del d[j]
+            self.assert_(val not in d)
+        self.assertEqual(len(d), 0)
+
+    def test_rotate(self):
+        s = tuple('abcde')
+        n = len(s)
+
+        d = deque(s)
+        d.rotate(1)             # verify rot(1)
+        self.assertEqual(''.join(d), 'eabcd')
+
+        d = deque(s)
+        d.rotate(-1)            # verify rot(-1)
+        self.assertEqual(''.join(d), 'bcdea')
+        d.rotate()              # check default to 1
+        self.assertEqual(tuple(d), s)
+
+        for i in xrange(n*3):
+            d = deque(s)
+            e = deque(d)
+            d.rotate(i)         # check vs. rot(1) n times
+            for j in xrange(i):
+                e.rotate(1)
+            self.assertEqual(tuple(d), tuple(e))
+            d.rotate(-i)        # check that it works in reverse
+            self.assertEqual(tuple(d), s)
+            e.rotate(n-i)       # check that it wraps forward
+            self.assertEqual(tuple(e), s)
+
+        for i in xrange(n*3):
+            d = deque(s)
+            e = deque(d)
+            d.rotate(-i)
+            for j in xrange(i):
+                e.rotate(-1)    # check vs. rot(-1) n times
+            self.assertEqual(tuple(d), tuple(e))
+            d.rotate(i)         # check that it works in reverse
+            self.assertEqual(tuple(d), s)
+            e.rotate(i-n)       # check that it wraps backaround
+            self.assertEqual(tuple(e), s)
+
+        d = deque(s)
+        e = deque(s)
+        e.rotate(BIG+17)        # verify on long series of rotates
+        dr = d.rotate
+        for i in xrange(BIG+17):
+            dr()
+        self.assertEqual(tuple(d), tuple(e))
+
+        self.assertRaises(TypeError, d.rotate, 'x')   # Wrong arg type
+        self.assertRaises(TypeError, d.rotate, 1, 10) # Too many args
+
+        d = deque()
+        d.rotate()              # rotate an empty deque
+        self.assertEqual(d, deque())
+
+    def test_len(self):
+        d = deque('ab')
+        self.assertEqual(len(d), 2)
+        d.popleft()
+        self.assertEqual(len(d), 1)
+        d.pop()
+        self.assertEqual(len(d), 0)
+        self.assertRaises(IndexError, d.pop)
+        self.assertEqual(len(d), 0)
+        d.append('c')
+        self.assertEqual(len(d), 1)
+        d.appendleft('d')
+        self.assertEqual(len(d), 2)
+        d.clear()
+        self.assertEqual(len(d), 0)
+
+    def test_underflow(self):
+        d = deque()
+        self.assertRaises(IndexError, d.pop)
+        self.assertRaises(IndexError, d.popleft)
+
+    def test_clear(self):
+        d = deque(xrange(100))
+        self.assertEqual(len(d), 100)
+        d.clear()
+        self.assertEqual(len(d), 0)
+        self.assertEqual(list(d), [])
+        d.clear()               # clear an emtpy deque
+        self.assertEqual(list(d), [])
+
+    def test_remove(self):
+        d = deque('abcdefghcij')
+        d.remove('c')
+        self.assertEqual(d, deque('abdefghcij'))
+        d.remove('c')
+        self.assertEqual(d, deque('abdefghij'))
+        self.assertRaises(ValueError, d.remove, 'c')
+        self.assertEqual(d, deque('abdefghij'))
+
+        # Handle comparison errors
+        d = deque(['a', 'b', BadCmp(), 'c'])
+        e = deque(d)
+        self.assertRaises(RuntimeError, d.remove, 'c')
+        for x, y in zip(d, e):
+            # verify that original order and values are retained.
+            self.assert_(x is y)
+
+        # Handle evil mutator
+        for match in (True, False):
+            d = deque(['ab'])
+            d.extend([MutateCmp(d, match), 'c'])
+            self.assertRaises(IndexError, d.remove, 'c')
+            self.assertEqual(d, deque())
+
+    def test_repr(self):
+        d = deque(xrange(200))
+        e = eval(repr(d))
+        self.assertEqual(list(d), list(e))
+        d.append(d)
+        self.assert_('...' in repr(d))
+
+    def test_print(self):
+        d = deque(xrange(200))
+        d.append(d)
+        try:
+            fo = open(test_support.TESTFN, "wb")
+            print >> fo, d,
+            fo.close()
+            fo = open(test_support.TESTFN, "rb")
+            self.assertEqual(fo.read(), repr(d))
+        finally:
+            fo.close()
+            os.remove(test_support.TESTFN)
+
+    def test_init(self):
+        self.assertRaises(TypeError, deque, 'abc', 2);
+        self.assertRaises(TypeError, deque, 1);
+
+    def test_hash(self):
+        self.assertRaises(TypeError, hash, deque('abc'))
+
+    def test_long_steadystate_queue_popleft(self):
+        for size in (0, 1, 2, 9):
+            d = deque(xrange(size))
+            append, pop = d.append, d.popleft
+            for i in xrange(size, BIG):
+                append(i)
+                x = pop()
+                if x != i - size:
+                    self.assertEqual(x, i-size)
+            self.assertEqual(list(d), range(BIG-size, BIG))
+
+    def test_long_steadystate_queue_popright(self):
+        for size in (0, 1, 2, 9):
+            d = deque(reversed(xrange(size)))
+            append, pop = d.appendleft, d.pop
+            for i in xrange(size, BIG):
+                append(i)
+                x = pop()
+                if x != i - size:
+                    self.assertEqual(x, i-size)
+            self.assertEqual(list(reversed(list(d))), range(BIG-size, BIG))
+
+    def test_big_queue_popleft(self):
+        pass
+        d = deque()
+        append, pop = d.append, d.popleft
+        for i in xrange(BIG):
+            append(i)
+        for i in xrange(BIG):
+            x = pop()
+            if x != i:
+                self.assertEqual(x, i)
+
+    def test_big_queue_popright(self):
+        d = deque()
+        append, pop = d.appendleft, d.pop
+        for i in xrange(BIG):
+            append(i)
+        for i in xrange(BIG):
+            x = pop()
+            if x != i:
+                self.assertEqual(x, i)
+
+    def test_big_stack_right(self):
+        d = deque()
+        append, pop = d.append, d.pop
+        for i in xrange(BIG):
+            append(i)
+        for i in reversed(xrange(BIG)):
+            x = pop()
+            if x != i:
+                self.assertEqual(x, i)
+        self.assertEqual(len(d), 0)
+
+    def test_big_stack_left(self):
+        d = deque()
+        append, pop = d.appendleft, d.popleft
+        for i in xrange(BIG):
+            append(i)
+        for i in reversed(xrange(BIG)):
+            x = pop()
+            if x != i:
+                self.assertEqual(x, i)
+        self.assertEqual(len(d), 0)
+
+    def test_roundtrip_iter_init(self):
+        d = deque(xrange(200))
+        e = deque(d)
+        self.assertNotEqual(id(d), id(e))
+        self.assertEqual(list(d), list(e))
+
+    def test_pickle(self):
+        d = deque(xrange(200))
+        for i in (0, 1, 2):
+            s = pickle.dumps(d, i)
+            e = pickle.loads(s)
+            self.assertNotEqual(id(d), id(e))
+            self.assertEqual(list(d), list(e))
+
+    def test_pickle_recursive(self):
+        d = deque('abc')
+        d.append(d)
+        for i in (0, 1, 2):
+            e = pickle.loads(pickle.dumps(d, i))
+            self.assertNotEqual(id(d), id(e))
+            self.assertEqual(id(e), id(e[-1]))
+
+    def test_deepcopy(self):
+        mut = [10]
+        d = deque([mut])
+        e = copy.deepcopy(d)
+        self.assertEqual(list(d), list(e))
+        mut[0] = 11
+        self.assertNotEqual(id(d), id(e))
+        self.assertNotEqual(list(d), list(e))
+
+    def test_copy(self):
+        mut = [10]
+        d = deque([mut])
+        e = copy.copy(d)
+        self.assertEqual(list(d), list(e))
+        mut[0] = 11
+        self.assertNotEqual(id(d), id(e))
+        self.assertEqual(list(d), list(e))
+
+    def test_reversed(self):
+        for s in ('abcd', xrange(200)):
+            self.assertEqual(list(reversed(deque(s))), list(reversed(s)))
+
+    def test_gc_doesnt_blowup(self):
+        import gc
+        # This used to assert-fail in deque_traverse() under a debug
+        # build, or run wild with a NULL pointer in a release build.
+        d = deque()
+        for i in xrange(100):
+            d.append(1)
+            gc.collect()
+
+class TestVariousIteratorArgs(unittest.TestCase):
+
+    def test_constructor(self):
+        for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
+            for g in (seq_tests.Sequence, seq_tests.IterFunc,
+                      seq_tests.IterGen, seq_tests.IterFuncStop,
+                      seq_tests.itermulti, seq_tests.iterfunc):
+                self.assertEqual(list(deque(g(s))), list(g(s)))
+            self.assertRaises(TypeError, deque, seq_tests.IterNextOnly(s))
+            self.assertRaises(TypeError, deque, seq_tests.IterNoNext(s))
+            self.assertRaises(ZeroDivisionError, deque, seq_tests.IterGenExc(s))
+
+    def test_iter_with_altered_data(self):
+        d = deque('abcdefg')
+        it = iter(d)
+        d.pop()
+        self.assertRaises(RuntimeError, it.next)
+
+    def test_runtime_error_on_empty_deque(self):
+        d = deque()
+        it = iter(d)
+        d.append(10)
+        self.assertRaises(RuntimeError, it.next)
+
+class Deque(deque):
+    pass
+
+class DequeWithBadIter(deque):
+    def __iter__(self):
+        raise TypeError
+
+class TestSubclass(unittest.TestCase):
+
+    def test_basics(self):
+        d = Deque(xrange(100))
+        d.__init__(xrange(100, 200))
+        for i in xrange(200, 400):
+            d.append(i)
+        for i in reversed(xrange(-200, 0)):
+            d.appendleft(i)
+        self.assertEqual(list(d), range(-200, 400))
+        self.assertEqual(len(d), 600)
+
+        left = [d.popleft() for i in xrange(250)]
+        self.assertEqual(left, range(-200, 50))
+        self.assertEqual(list(d), range(50, 400))
+
+        right = [d.pop() for i in xrange(250)]
+        right.reverse()
+        self.assertEqual(right, range(150, 400))
+        self.assertEqual(list(d), range(50, 150))
+
+        d.clear()
+        self.assertEqual(len(d), 0)
+
+    def test_copy_pickle(self):
+
+        d = Deque('abc')
+
+        e = d.__copy__()
+        self.assertEqual(type(d), type(e))
+        self.assertEqual(list(d), list(e))
+
+        e = Deque(d)
+        self.assertEqual(type(d), type(e))
+        self.assertEqual(list(d), list(e))
+
+        s = pickle.dumps(d)
+        e = pickle.loads(s)
+        self.assertNotEqual(id(d), id(e))
+        self.assertEqual(type(d), type(e))
+        self.assertEqual(list(d), list(e))
+
+    def test_pickle(self):
+        d = Deque('abc')
+        d.append(d)
+
+        e = pickle.loads(pickle.dumps(d))
+        self.assertNotEqual(id(d), id(e))
+        self.assertEqual(type(d), type(e))
+        dd = d.pop()
+        ee = e.pop()
+        self.assertEqual(id(e), id(ee))
+        self.assertEqual(d, e)
+
+        d.x = d
+        e = pickle.loads(pickle.dumps(d))
+        self.assertEqual(id(e), id(e.x))
+
+        d = DequeWithBadIter('abc')
+        self.assertRaises(TypeError, pickle.dumps, d)
+
+#    def test_weakref(self):
+#        d = deque('gallahad')
+#        p = proxy(d)
+#        self.assertEqual(str(p), str(d))
+#        d = None
+#        self.assertRaises(ReferenceError, str, p)
+
+    def test_strange_subclass(self):
+        class X(deque):
+            def __iter__(self):
+                return iter([])
+        d1 = X([1,2,3])
+        d2 = X([4,5,6])
+        d1 == d2   # not clear if this is supposed to be True or False,
+                   # but it used to give a SystemError
+
+
+class SubclassWithKwargs(deque):
+    def __init__(self, newarg=1):
+        deque.__init__(self)
+
+class TestSubclassWithKwargs(unittest.TestCase):
+    def test_subclass_with_kwargs(self):
+        # SF bug #1486663 -- this used to erroneously raise a TypeError
+        SubclassWithKwargs(newarg=1)
+
+#==============================================================================
+
+libreftest = """
+Example from the Library Reference:  Doc/lib/libcollections.tex
+
+>>> from collections import deque
+>>> d = deque('ghi')                 # make a new deque with three items
+>>> for elem in d:                   # iterate over the deque's elements
+...     print elem.upper()
+G
+H
+I
+>>> d.append('j')                    # add a new entry to the right side
+>>> d.appendleft('f')                # add a new entry to the left side
+>>> d                                # show the representation of the deque
+deque(['f', 'g', 'h', 'i', 'j'])
+>>> d.pop()                          # return and remove the rightmost item
+'j'
+>>> d.popleft()                      # return and remove the leftmost item
+'f'
+>>> list(d)                          # list the contents of the deque
+['g', 'h', 'i']
+>>> d[0]                             # peek at leftmost item
+'g'
+>>> d[-1]                            # peek at rightmost item
+'i'
+>>> list(reversed(d))                # list the contents of a deque in reverse
+['i', 'h', 'g']
+>>> 'h' in d                         # search the deque
+True
+>>> d.extend('jkl')                  # add multiple elements at once
+>>> d
+deque(['g', 'h', 'i', 'j', 'k', 'l'])
+>>> d.rotate(1)                      # right rotation
+>>> d
+deque(['l', 'g', 'h', 'i', 'j', 'k'])
+>>> d.rotate(-1)                     # left rotation
+>>> d
+deque(['g', 'h', 'i', 'j', 'k', 'l'])
+>>> deque(reversed(d))               # make a new deque in reverse order
+deque(['l', 'k', 'j', 'i', 'h', 'g'])
+>>> d.clear()                        # empty the deque
+>>> d.pop()                          # cannot pop from an empty deque
+Traceback (most recent call last):
+  File "<pyshell#6>", line 1, in -toplevel-
+    d.pop()
+IndexError: pop from an empty deque
+
+>>> d.extendleft('abc')              # extendleft() reverses the input order
+>>> d
+deque(['c', 'b', 'a'])
+
+
+
+>>> def delete_nth(d, n):
+...     d.rotate(-n)
+...     d.popleft()
+...     d.rotate(n)
+...
+>>> d = deque('abcdef')
+>>> delete_nth(d, 2)   # remove the entry at d[2]
+>>> d
+deque(['a', 'b', 'd', 'e', 'f'])
+
+
+
+>>> def roundrobin(*iterables):
+...     pending = deque(iter(i) for i in iterables)
+...     while pending:
+...         task = pending.popleft()
+...         try:
+...             yield task.next()
+...         except StopIteration:
+...             continue
+...         pending.append(task)
+...
+
+>>> for value in roundrobin('abc', 'd', 'efgh'):
+...     print value
+...
+a
+d
+e
+b
+f
+c
+g
+h
+
+
+>>> def maketree(iterable):
+...     d = deque(iterable)
+...     while len(d) > 1:
+...         pair = [d.popleft(), d.popleft()]
+...         d.append(pair)
+...     return list(d)
+...
+>>> print maketree('abcdefgh')
+[[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]]]
+
+"""
+
+
+#==============================================================================
+
+__test__ = {'libreftest' : libreftest}
+
+def test_main(verbose=None):
+    import sys
+    test_classes = (
+        TestBasic,
+        TestVariousIteratorArgs,
+        TestSubclass,
+        TestSubclassWithKwargs,
+    )
+
+    test_support.run_unittest(*test_classes)
+
+    # verify reference counting
+    if verbose and hasattr(sys, "gettotalrefcount"):
+        import gc
+        counts = [None] * 5
+        for i in xrange(len(counts)):
+            test_support.run_unittest(*test_classes)
+            gc.collect()
+            counts[i] = sys.gettotalrefcount()
+        print counts
+
+    # doctests
+    from test import test_deque
+    test_support.run_doctest(test_deque, verbose)
+
+if __name__ == "__main__":
+    test_main(verbose=True)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_descr.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_descr.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,4218 @@
+# Test enhancements related to descriptors and new-style classes
+
+from test.test_support import verify, vereq, verbose, TestFailed, TESTFN, get_original_stdout
+from copy import deepcopy
+import warnings
+import gc
+
+warnings.filterwarnings("ignore",
+         r'complex divmod\(\), // and % are deprecated$',
+         DeprecationWarning, r'(<string>|%s)$' % __name__)
+
+def veris(a, b):
+    if a is not b:
+        raise TestFailed, "%r is %r" % (a, b)
+
+def testunop(a, res, expr="len(a)", meth="__len__"):
+    if verbose: print "checking", expr
+    dict = {'a': a}
+    vereq(eval(expr, dict), res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(t.__dict__[meth](a), res)
+    vereq(m(a), res)
+    bm = getattr(a, meth)
+    vereq(bm(), res)
+
+def testbinop(a, b, res, expr="a+b", meth="__add__"):
+    if verbose: print "checking", expr
+    dict = {'a': a, 'b': b}
+
+    # XXX Hack so this passes before 2.3 when -Qnew is specified.
+    if meth == "__div__" and 1/2 == 0.5:
+        meth = "__truediv__"
+
+    vereq(eval(expr, dict), res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(t.__dict__[meth](a, b), res)
+    vereq(m(a, b), res)
+    bm = getattr(a, meth)
+    vereq(bm(b), res)
+
+def testternop(a, b, c, res, expr="a[b:c]", meth="__getslice__"):
+    if verbose: print "checking", expr
+    dict = {'a': a, 'b': b, 'c': c}
+    vereq(eval(expr, dict), res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    vereq(m(a, b, c), res)
+    bm = getattr(a, meth)
+    vereq(bm(b, c), res)
+
+def testsetop(a, b, res, stmt="a+=b", meth="__iadd__"):
+    if verbose: print "checking", stmt
+    dict = {'a': deepcopy(a), 'b': b}
+    exec stmt in dict
+    vereq(dict['a'], res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    dict['a'] = deepcopy(a)
+    m(dict['a'], b)
+    vereq(dict['a'], res)
+    dict['a'] = deepcopy(a)
+    bm = getattr(dict['a'], meth)
+    bm(b)
+    vereq(dict['a'], res)
+
+def testset2op(a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
+    if verbose: print "checking", stmt
+    dict = {'a': deepcopy(a), 'b': b, 'c': c}
+    exec stmt in dict
+    vereq(dict['a'], res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    dict['a'] = deepcopy(a)
+    m(dict['a'], b, c)
+    vereq(dict['a'], res)
+    dict['a'] = deepcopy(a)
+    bm = getattr(dict['a'], meth)
+    bm(b, c)
+    vereq(dict['a'], res)
+
+def testset3op(a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"):
+    if verbose: print "checking", stmt
+    dict = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
+    exec stmt in dict
+    vereq(dict['a'], res)
+    t = type(a)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    m = getattr(t, meth)
+    vereq(m, t.__dict__[meth])
+    dict['a'] = deepcopy(a)
+    m(dict['a'], b, c, d)
+    vereq(dict['a'], res)
+    dict['a'] = deepcopy(a)
+    bm = getattr(dict['a'], meth)
+    bm(b, c, d)
+    vereq(dict['a'], res)
+
+def class_docstrings():
+    class Classic:
+        "A classic docstring."
+    vereq(Classic.__doc__, "A classic docstring.")
+    vereq(Classic.__dict__['__doc__'], "A classic docstring.")
+
+    class Classic2:
+        pass
+    verify(Classic2.__doc__ is None)
+
+    class NewStatic(object):
+        "Another docstring."
+    vereq(NewStatic.__doc__, "Another docstring.")
+    vereq(NewStatic.__dict__['__doc__'], "Another docstring.")
+
+    class NewStatic2(object):
+        pass
+    verify(NewStatic2.__doc__ is None)
+
+    class NewDynamic(object):
+        "Another docstring."
+    vereq(NewDynamic.__doc__, "Another docstring.")
+    vereq(NewDynamic.__dict__['__doc__'], "Another docstring.")
+
+    class NewDynamic2(object):
+        pass
+    verify(NewDynamic2.__doc__ is None)
+
+def lists():
+    if verbose: print "Testing list operations..."
+    testbinop([1], [2], [1,2], "a+b", "__add__")
+    testbinop([1,2,3], 2, 1, "b in a", "__contains__")
+    testbinop([1,2,3], 4, 0, "b in a", "__contains__")
+    testbinop([1,2,3], 1, 2, "a[b]", "__getitem__")
+    testternop([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__")
+    testsetop([1], [2], [1,2], "a+=b", "__iadd__")
+    testsetop([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
+    testunop([1,2,3], 3, "len(a)", "__len__")
+    testbinop([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
+    testbinop([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
+    testset2op([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
+    testset3op([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__")
+
+def dicts():
+    if verbose: print "Testing dict operations..."
+    testbinop({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__")
+    testbinop({1:2,3:4}, 1, 1, "b in a", "__contains__")
+    testbinop({1:2,3:4}, 2, 0, "b in a", "__contains__")
+    testbinop({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
+    d = {1:2,3:4}
+    l1 = []
+    for i in d.keys(): l1.append(i)
+    l = []
+    for i in iter(d): l.append(i)
+    vereq(l, l1)
+    l = []
+    for i in d.__iter__(): l.append(i)
+    vereq(l, l1)
+    l = []
+    for i in dict.__iter__(d): l.append(i)
+    vereq(l, l1)
+    d = {1:2, 3:4}
+    testunop(d, 2, "len(a)", "__len__")
+    vereq(eval(repr(d), {}), d)
+    vereq(eval(d.__repr__(), {}), d)
+    testset2op({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__")
+
+def dict_constructor():
+    if verbose:
+        print "Testing dict constructor ..."
+    d = dict()
+    vereq(d, {})
+    d = dict({})
+    vereq(d, {})
+    d = dict({1: 2, 'a': 'b'})
+    vereq(d, {1: 2, 'a': 'b'})
+    vereq(d, dict(d.items()))
+    vereq(d, dict(d.iteritems()))
+    d = dict({'one':1, 'two':2})
+    vereq(d, dict(one=1, two=2))
+    vereq(d, dict(**d))
+    vereq(d, dict({"one": 1}, two=2))
+    vereq(d, dict([("two", 2)], one=1))
+    vereq(d, dict([("one", 100), ("two", 200)], **d))
+    verify(d is not dict(**d))
+    for badarg in 0, 0L, 0j, "0", [0], (0,):
+        try:
+            dict(badarg)
+        except TypeError:
+            pass
+        except ValueError:
+            if badarg == "0":
+                # It's a sequence, and its elements are also sequences (gotta
+                # love strings <wink>), but they aren't of length 2, so this
+                # one seemed better as a ValueError than a TypeError.
+                pass
+            else:
+                raise TestFailed("no TypeError from dict(%r)" % badarg)
+        else:
+            raise TestFailed("no TypeError from dict(%r)" % badarg)
+
+    try:
+        dict({}, {})
+    except TypeError:
+        pass
+    else:
+        raise TestFailed("no TypeError from dict({}, {})")
+
+    class Mapping:
+        # Lacks a .keys() method; will be added later.
+        dict = {1:2, 3:4, 'a':1j}
+
+    try:
+        dict(Mapping())
+    except TypeError:
+        pass
+    else:
+        raise TestFailed("no TypeError from dict(incomplete mapping)")
+
+    Mapping.keys = lambda self: self.dict.keys()
+    Mapping.__getitem__ = lambda self, i: self.dict[i]
+    d = dict(Mapping())
+    vereq(d, Mapping.dict)
+
+    # Init from sequence of iterable objects, each producing a 2-sequence.
+    class AddressBookEntry:
+        def __init__(self, first, last):
+            self.first = first
+            self.last = last
+        def __iter__(self):
+            return iter([self.first, self.last])
+
+    d = dict([AddressBookEntry('Tim', 'Warsaw'),
+              AddressBookEntry('Barry', 'Peters'),
+              AddressBookEntry('Tim', 'Peters'),
+              AddressBookEntry('Barry', 'Warsaw')])
+    vereq(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
+
+    d = dict(zip(range(4), range(1, 5)))
+    vereq(d, dict([(i, i+1) for i in range(4)]))
+
+    # Bad sequence lengths.
+    for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
+        try:
+            dict(bad)
+        except ValueError:
+            pass
+        else:
+            raise TestFailed("no ValueError from dict(%r)" % bad)
+
+def test_dir():
+    if verbose:
+        print "Testing dir() ..."
+    junk = 12
+    vereq(dir(), ['junk'])
+    del junk
+
+    # Just make sure these don't blow up!
+    for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, test_dir:
+        dir(arg)
+
+    # Try classic classes.
+    class C:
+        Cdata = 1
+        def Cmethod(self): pass
+
+    cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__']
+    vereq(dir(C), cstuff)
+    verify('im_self' in dir(C.Cmethod))
+
+    c = C()  # c.__doc__ is an odd thing to see here; ditto c.__module__.
+    vereq(dir(c), cstuff)
+
+    c.cdata = 2
+    c.cmethod = lambda self: 0
+    vereq(dir(c), cstuff + ['cdata', 'cmethod'])
+    verify('im_self' in dir(c.Cmethod))
+
+    class A(C):
+        Adata = 1
+        def Amethod(self): pass
+
+    astuff = ['Adata', 'Amethod'] + cstuff
+    vereq(dir(A), astuff)
+    verify('im_self' in dir(A.Amethod))
+    a = A()
+    vereq(dir(a), astuff)
+    verify('im_self' in dir(a.Amethod))
+    a.adata = 42
+    a.amethod = lambda self: 3
+    vereq(dir(a), astuff + ['adata', 'amethod'])
+
+    # The same, but with new-style classes.  Since these have object as a
+    # base class, a lot more gets sucked in.
+    def interesting(strings):
+        return [s for s in strings if not s.startswith('_')]
+
+    class C(object):
+        Cdata = 1
+        def Cmethod(self): pass
+
+    cstuff = ['Cdata', 'Cmethod']
+    vereq(interesting(dir(C)), cstuff)
+
+    c = C()
+    vereq(interesting(dir(c)), cstuff)
+    verify('im_self' in dir(C.Cmethod))
+
+    c.cdata = 2
+    c.cmethod = lambda self: 0
+    vereq(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
+    verify('im_self' in dir(c.Cmethod))
+
+    class A(C):
+        Adata = 1
+        def Amethod(self): pass
+
+    astuff = ['Adata', 'Amethod'] + cstuff
+    vereq(interesting(dir(A)), astuff)
+    verify('im_self' in dir(A.Amethod))
+    a = A()
+    vereq(interesting(dir(a)), astuff)
+    a.adata = 42
+    a.amethod = lambda self: 3
+    vereq(interesting(dir(a)), astuff + ['adata', 'amethod'])
+    verify('im_self' in dir(a.Amethod))
+
+    # Try a module subclass.
+    import sys
+    class M(type(sys)):
+        pass
+    minstance = M("m")
+    minstance.b = 2
+    minstance.a = 1
+    names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]]
+    vereq(names, ['a', 'b'])
+
+    class M2(M):
+        def getdict(self):
+            return "Not a dict!"
+        __dict__ = property(getdict)
+
+    m2instance = M2("m2")
+    m2instance.b = 2
+    m2instance.a = 1
+    vereq(m2instance.__dict__, "Not a dict!")
+    try:
+        dir(m2instance)
+    except TypeError:
+        pass
+
+    # Two essentially featureless objects, just inheriting stuff from
+    # object.  NB. in PyPy, dir(None) additionally contains '__nonzero__'.
+    vereq(dir(object()), dir(Ellipsis))
+
+    # Nasty test case for proxied objects
+    class Wrapper(object):
+        def __init__(self, obj):
+            self.__obj = obj
+        def __repr__(self):
+            return "Wrapper(%s)" % repr(self.__obj)
+        def __getitem__(self, key):
+            return Wrapper(self.__obj[key])
+        def __len__(self):
+            return len(self.__obj)
+        def __getattr__(self, name):
+            return Wrapper(getattr(self.__obj, name))
+
+    class C(object):
+        def __getclass(self):
+            return Wrapper(type(self))
+        __class__ = property(__getclass)
+
+    dir(C()) # This used to segfault
+
+binops = {
+    'add': '+',
+    'sub': '-',
+    'mul': '*',
+    'div': '/',
+    'mod': '%',
+    'divmod': 'divmod',
+    'pow': '**',
+    'lshift': '<<',
+    'rshift': '>>',
+    'and': '&',
+    'xor': '^',
+    'or': '|',
+    'cmp': 'cmp',
+    'lt': '<',
+    'le': '<=',
+    'eq': '==',
+    'ne': '!=',
+    'gt': '>',
+    'ge': '>=',
+    }
+
+for name, expr in binops.items():
+    if expr.islower():
+        expr = expr + "(a, b)"
+    else:
+        expr = 'a %s b' % expr
+    binops[name] = expr
+
+unops = {
+    'pos': '+',
+    'neg': '-',
+    'abs': 'abs',
+    'invert': '~',
+    'int': 'int',
+    'long': 'long',
+    'float': 'float',
+    'oct': 'oct',
+    'hex': 'hex',
+    }
+
+for name, expr in unops.items():
+    if expr.islower():
+        expr = expr + "(a)"
+    else:
+        expr = '%s a' % expr
+    unops[name] = expr
+
+def numops(a, b, skip=[]):
+    dict = {'a': a, 'b': b}
+    for name, expr in binops.items():
+        if name not in skip:
+            name = "__%s__" % name
+            if hasattr(a, name):
+                res = eval(expr, dict)
+                testbinop(a, b, res, expr, name)
+    for name, expr in unops.items():
+        if name not in skip:
+            name = "__%s__" % name
+            if hasattr(a, name):
+                res = eval(expr, dict)
+                testunop(a, res, expr, name)
+
+def ints():
+    if verbose: print "Testing int operations..."
+    numops(100, 3)
+    # The following crashes in Python 2.2
+    vereq((1).__nonzero__(), 1)
+    vereq((0).__nonzero__(), 0)
+    # This returns 'NotImplemented' in Python 2.2
+    class C(int):
+        def __add__(self, other):
+            return NotImplemented
+    vereq(C(5L), 5)
+    try:
+        C() + ""
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "NotImplemented should have caused TypeError"
+    import sys
+    try:
+        C(sys.maxint+1)
+    except OverflowError:
+        pass
+    else:
+        raise TestFailed, "should have raised OverflowError"
+
+def longs():
+    if verbose: print "Testing long operations..."
+    numops(100L, 3L)
+
+def floats():
+    if verbose: print "Testing float operations..."
+    numops(100.0, 3.0)
+
+def complexes():
+    if verbose: print "Testing complex operations..."
+    numops(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge', 'int', 'long', 'float'])
+    class Number(complex):
+        __slots__ = ['prec']
+        def __new__(cls, *args, **kwds):
+            result = complex.__new__(cls, *args)
+            result.prec = kwds.get('prec', 12)
+            return result
+        def __repr__(self):
+            prec = self.prec
+            if self.imag == 0.0:
+                return "%.*g" % (prec, self.real)
+            if self.real == 0.0:
+                return "%.*gj" % (prec, self.imag)
+            return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
+        __str__ = __repr__
+
+    a = Number(3.14, prec=6)
+    vereq(repr(a), "3.14")
+    vereq(a.prec, 6)
+
+    a = Number(a, prec=2)
+    vereq(repr(a), "3.1")
+    vereq(a.prec, 2)
+
+    a = Number(234.5)
+    vereq(repr(a), "234.5")
+    vereq(a.prec, 12)
+
+
+def pydicts():
+    if verbose: print "Testing Python subclass of dict..."
+    verify(issubclass(dict, dict))
+    verify(isinstance({}, dict))
+    d = dict()
+    vereq(d, {})
+    verify(d.__class__ is dict)
+    verify(isinstance(d, dict))
+    class C(dict):
+        state = -1
+        def __init__(self, *a, **kw):
+            if a:
+                vereq(len(a), 1)
+                self.state = a[0]
+            if kw:
+                for k, v in kw.items(): self[v] = k
+        def __getitem__(self, key):
+            return self.get(key, 0)
+        def __setitem__(self, key, value):
+            verify(isinstance(key, type(0)))
+            dict.__setitem__(self, key, value)
+        def setstate(self, state):
+            self.state = state
+        def getstate(self):
+            return self.state
+    verify(issubclass(C, dict))
+    a1 = C(12)
+    vereq(a1.state, 12)
+    a2 = C(foo=1, bar=2)
+    vereq(a2[1] == 'foo' and a2[2], 'bar')
+    a = C()
+    vereq(a.state, -1)
+    vereq(a.getstate(), -1)
+    a.setstate(0)
+    vereq(a.state, 0)
+    vereq(a.getstate(), 0)
+    a.setstate(10)
+    vereq(a.state, 10)
+    vereq(a.getstate(), 10)
+    vereq(a[42], 0)
+    a[42] = 24
+    vereq(a[42], 24)
+    if verbose: print "pydict stress test ..."
+    N = 50
+    for i in range(N):
+        a[i] = C()
+        for j in range(N):
+            a[i][j] = i*j
+    for i in range(N):
+        for j in range(N):
+            vereq(a[i][j], i*j)
+
+def pylists():
+    if verbose: print "Testing Python subclass of list..."
+    class C(list):
+        def __getitem__(self, i):
+            return list.__getitem__(self, i) + 100
+        def __getslice__(self, i, j):
+            return (i, j)
+    a = C()
+    a.extend([0,1,2])
+    vereq(a[0], 100)
+    vereq(a[1], 101)
+    vereq(a[2], 102)
+    vereq(a[100:200], (100,200))
+
+def metaclass():
+    if verbose: print "Testing __metaclass__..."
+    class C:
+        __metaclass__ = type
+        def __init__(self):
+            self.__state = 0
+        def getstate(self):
+            return self.__state
+        def setstate(self, state):
+            self.__state = state
+    a = C()
+    vereq(a.getstate(), 0)
+    a.setstate(10)
+    vereq(a.getstate(), 10)
+    class D:
+        class __metaclass__(type):
+            def myself(cls): return cls
+    vereq(D.myself(), D)
+    d = D()
+    verify(d.__class__ is D)
+    class M1(type):
+        def __new__(cls, name, bases, dict):
+            dict['__spam__'] = 1
+            return type.__new__(cls, name, bases, dict)
+    class C:
+        __metaclass__ = M1
+    vereq(C.__spam__, 1)
+    c = C()
+    vereq(c.__spam__, 1)
+
+    class _instance(object):
+        pass
+    class M2(object):
+        @staticmethod
+        def __new__(cls, name, bases, dict):
+            self = object.__new__(cls)
+            self.name = name
+            self.bases = bases
+            self.dict = dict
+            return self
+        def __call__(self):
+            it = _instance()
+            # Early binding of methods
+            for key in self.dict:
+                if key.startswith("__"):
+                    continue
+                setattr(it, key, self.dict[key].__get__(it, self))
+            return it
+    class C:
+        __metaclass__ = M2
+        def spam(self):
+            return 42
+    vereq(C.name, 'C')
+    vereq(C.bases, ())
+    verify('spam' in C.dict)
+    c = C()
+    vereq(c.spam(), 42)
+
+    # More metaclass examples
+
+    class autosuper(type):
+        # Automatically add __super to the class
+        # This trick only works for dynamic classes
+        def __new__(metaclass, name, bases, dict):
+            cls = super(autosuper, metaclass).__new__(metaclass,
+                                                      name, bases, dict)
+            # Name mangling for __super removes leading underscores
+            while name[:1] == "_":
+                name = name[1:]
+            if name:
+                name = "_%s__super" % name
+            else:
+                name = "__super"
+            setattr(cls, name, super(cls))
+            return cls
+    class A:
+        __metaclass__ = autosuper
+        def meth(self):
+            return "A"
+    class B(A):
+        def meth(self):
+            return "B" + self.__super.meth()
+    class C(A):
+        def meth(self):
+            return "C" + self.__super.meth()
+    class D(C, B):
+        def meth(self):
+            return "D" + self.__super.meth()
+    vereq(D().meth(), "DCBA")
+    class E(B, C):
+        def meth(self):
+            return "E" + self.__super.meth()
+    vereq(E().meth(), "EBCA")
+
+    class autoproperty(type):
+        # Automatically create property attributes when methods
+        # named _get_x and/or _set_x are found
+        def __new__(metaclass, name, bases, dict):
+            hits = {}
+            for key, val in dict.iteritems():
+                if key.startswith("_get_"):
+                    key = key[5:]
+                    get, set = hits.get(key, (None, None))
+                    get = val
+                    hits[key] = get, set
+                elif key.startswith("_set_"):
+                    key = key[5:]
+                    get, set = hits.get(key, (None, None))
+                    set = val
+                    hits[key] = get, set
+            for key, (get, set) in hits.iteritems():
+                dict[key] = property(get, set)
+            return super(autoproperty, metaclass).__new__(metaclass,
+                                                        name, bases, dict)
+    class A:
+        __metaclass__ = autoproperty
+        def _get_x(self):
+            return -self.__x
+        def _set_x(self, x):
+            self.__x = -x
+    a = A()
+    verify(not hasattr(a, "x"))
+    a.x = 12
+    vereq(a.x, 12)
+    vereq(a._A__x, -12)
+
+    class multimetaclass(autoproperty, autosuper):
+        # Merge of multiple cooperating metaclasses
+        pass
+    class A:
+        __metaclass__ = multimetaclass
+        def _get_x(self):
+            return "A"
+    class B(A):
+        def _get_x(self):
+            return "B" + self.__super._get_x()
+    class C(A):
+        def _get_x(self):
+            return "C" + self.__super._get_x()
+    class D(C, B):
+        def _get_x(self):
+            return "D" + self.__super._get_x()
+    vereq(D().x, "DCBA")
+
+    # Make sure type(x) doesn't call x.__class__.__init__
+    class T(type):
+        counter = 0
+        def __init__(self, *args):
+            T.counter += 1
+    class C:
+        __metaclass__ = T
+    vereq(T.counter, 1)
+    a = C()
+    vereq(type(a), C)
+    vereq(T.counter, 1)
+
+    class C(object): pass
+    c = C()
+    try: c()
+    except TypeError: pass
+    else: raise TestFailed, "calling object w/o call method should raise TypeError"
+
+def pymods():
+    if verbose: print "Testing Python subclass of module..."
+    log = []
+    import sys
+    MT = type(sys)
+    class MM(MT):
+        def __init__(self, name):
+            MT.__init__(self, name)
+        def __getattribute__(self, name):
+            log.append(("getattr", name))
+            return MT.__getattribute__(self, name)
+        def __setattr__(self, name, value):
+            log.append(("setattr", name, value))
+            MT.__setattr__(self, name, value)
+        def __delattr__(self, name):
+            log.append(("delattr", name))
+            MT.__delattr__(self, name)
+    a = MM("a")
+    a.foo = 12
+    x = a.foo
+    del a.foo
+    vereq(log, [("setattr", "foo", 12),
+                ("getattr", "foo"),
+                ("delattr", "foo")])
+
+def multi():
+    if verbose: print "Testing multiple inheritance..."
+    class C(object):
+        def __init__(self):
+            self.__state = 0
+        def getstate(self):
+            return self.__state
+        def setstate(self, state):
+            self.__state = state
+    a = C()
+    vereq(a.getstate(), 0)
+    a.setstate(10)
+    vereq(a.getstate(), 10)
+    class D(dict, C):
+        def __init__(self):
+            type({}).__init__(self)
+            C.__init__(self)
+    d = D()
+    vereq(d.keys(), [])
+    d["hello"] = "world"
+    vereq(d.items(), [("hello", "world")])
+    vereq(d["hello"], "world")
+    vereq(d.getstate(), 0)
+    d.setstate(10)
+    vereq(d.getstate(), 10)
+    vereq(D.__mro__, (D, dict, C, object))
+
+    # SF bug #442833
+    class Node(object):
+        def __int__(self):
+            return int(self.foo())
+        def foo(self):
+            return "23"
+    class Frag(Node, list):
+        def foo(self):
+            return "42"
+    vereq(Node().__int__(), 23)
+    vereq(int(Node()), 23)
+    vereq(Frag().__int__(), 42)
+    vereq(int(Frag()), 42)
+
+    # MI mixing classic and new-style classes.
+
+    class A:
+        x = 1
+
+    class B(A):
+        pass
+
+    class C(A):
+        x = 2
+
+    class D(B, C):
+        pass
+    vereq(D.x, 1)
+
+    # Classic MRO is preserved for a classic base class.
+    class E(D, object):
+        pass
+    vereq(E.__mro__, (E, D, B, A, C, object))
+    vereq(E.x, 1)
+
+    # But with a mix of classic bases, their MROs are combined using
+    # new-style MRO.
+    class F(B, C, object):
+        pass
+    vereq(F.__mro__, (F, B, C, A, object))
+    vereq(F.x, 2)
+
+    # Try something else.
+    class C:
+        def cmethod(self):
+            return "C a"
+        def all_method(self):
+            return "C b"
+
+    class M1(C, object):
+        def m1method(self):
+            return "M1 a"
+        def all_method(self):
+            return "M1 b"
+
+    vereq(M1.__mro__, (M1, C, object))
+    m = M1()
+    vereq(m.cmethod(), "C a")
+    vereq(m.m1method(), "M1 a")
+    vereq(m.all_method(), "M1 b")
+
+    class D(C):
+        def dmethod(self):
+            return "D a"
+        def all_method(self):
+            return "D b"
+
+    class M2(D, object):
+        def m2method(self):
+            return "M2 a"
+        def all_method(self):
+            return "M2 b"
+
+    vereq(M2.__mro__, (M2, D, C, object))
+    m = M2()
+    vereq(m.cmethod(), "C a")
+    vereq(m.dmethod(), "D a")
+    vereq(m.m2method(), "M2 a")
+    vereq(m.all_method(), "M2 b")
+
+    class M3(M1, M2, object):
+        def m3method(self):
+            return "M3 a"
+        def all_method(self):
+            return "M3 b"
+    vereq(M3.__mro__, (M3, M1, M2, D, C, object))
+    m = M3()
+    vereq(m.cmethod(), "C a")
+    vereq(m.dmethod(), "D a")
+    vereq(m.m1method(), "M1 a")
+    vereq(m.m2method(), "M2 a")
+    vereq(m.m3method(), "M3 a")
+    vereq(m.all_method(), "M3 b")
+
+    class Classic:
+        pass
+    try:
+        class New(Classic):
+            __metaclass__ = type
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "new class with only classic bases - shouldn't be"
+
+def diamond():
+    if verbose: print "Testing multiple inheritance special cases..."
+    class A(object):
+        def spam(self): return "A"
+    vereq(A().spam(), "A")
+    class B(A):
+        def boo(self): return "B"
+        def spam(self): return "B"
+    vereq(B().spam(), "B")
+    vereq(B().boo(), "B")
+    class C(A):
+        def boo(self): return "C"
+    vereq(C().spam(), "A")
+    vereq(C().boo(), "C")
+    class D(B, C): pass
+    vereq(D().spam(), "B")
+    vereq(D().boo(), "B")
+    vereq(D.__mro__, (D, B, C, A, object))
+    class E(C, B): pass
+    vereq(E().spam(), "B")
+    vereq(E().boo(), "C")
+    vereq(E.__mro__, (E, C, B, A, object))
+    # MRO order disagreement
+    try:
+        class F(D, E): pass
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "expected MRO order disagreement (F)"
+    try:
+        class G(E, D): pass
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "expected MRO order disagreement (G)"
+
+
+# see thread python-dev/2002-October/029035.html
+def ex5():
+    if verbose: print "Testing ex5 from C3 switch discussion..."
+    class A(object): pass
+    class B(object): pass
+    class C(object): pass
+    class X(A): pass
+    class Y(A): pass
+    class Z(X,B,Y,C): pass
+    vereq(Z.__mro__, (Z, X, B, Y, A, C, object))
+
+# see "A Monotonic Superclass Linearization for Dylan",
+# by Kim Barrett et al. (OOPSLA 1996)
+def monotonicity():
+    if verbose: print "Testing MRO monotonicity..."
+    class Boat(object): pass
+    class DayBoat(Boat): pass
+    class WheelBoat(Boat): pass
+    class EngineLess(DayBoat): pass
+    class SmallMultihull(DayBoat): pass
+    class PedalWheelBoat(EngineLess,WheelBoat): pass
+    class SmallCatamaran(SmallMultihull): pass
+    class Pedalo(PedalWheelBoat,SmallCatamaran): pass
+
+    vereq(PedalWheelBoat.__mro__,
+          (PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat,
+           object))
+    vereq(SmallCatamaran.__mro__,
+          (SmallCatamaran, SmallMultihull, DayBoat, Boat, object))
+
+    vereq(Pedalo.__mro__,
+          (Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran,
+           SmallMultihull, DayBoat, WheelBoat, Boat, object))
+
+# see "A Monotonic Superclass Linearization for Dylan",
+# by Kim Barrett et al. (OOPSLA 1996)
+def consistency_with_epg():
+    if verbose: print "Testing consistentcy with EPG..."
+    class Pane(object): pass
+    class ScrollingMixin(object): pass
+    class EditingMixin(object): pass
+    class ScrollablePane(Pane,ScrollingMixin): pass
+    class EditablePane(Pane,EditingMixin): pass
+    class EditableScrollablePane(ScrollablePane,EditablePane): pass
+
+    vereq(EditableScrollablePane.__mro__,
+          (EditableScrollablePane, ScrollablePane, EditablePane,
+           Pane, ScrollingMixin, EditingMixin, object))
+
+mro_err_msg = "cycle among base classes:"
+
+def mro_disagreement():
+    if verbose: print "Testing error messages for MRO disagreement..."
+    def raises(exc, expected, callable, *args):
+        try:
+            callable(*args)
+        except exc, msg:
+            if not str(msg).startswith(expected):
+                raise TestFailed, "Message %r, expected %r" % (str(msg),
+                                                               expected)
+        else:
+            raise TestFailed, "Expected %s" % exc
+    class A(object): pass
+    class B(A): pass
+    class C(object): pass
+    # Test some very simple errors
+    raises(TypeError, "duplicate base class A",
+           type, "X", (A, A), {})
+    raises(TypeError, mro_err_msg,
+           type, "X", (A, B), {})
+    raises(TypeError, mro_err_msg,
+           type, "X", (A, C, B), {})
+    # Test a slightly more complex error
+    class GridLayout(object): pass
+    class HorizontalGrid(GridLayout): pass
+    class VerticalGrid(GridLayout): pass
+    class HVGrid(HorizontalGrid, VerticalGrid): pass
+    class VHGrid(VerticalGrid, HorizontalGrid): pass
+    raises(TypeError, mro_err_msg,
+           type, "ConfusedGrid", (HVGrid, VHGrid), {})
+
+def objects():
+    if verbose: print "Testing object class..."
+    a = object()
+    vereq(a.__class__, object)
+    vereq(type(a), object)
+    b = object()
+    verify(a is not b)
+    verify(not hasattr(a, "foo"))
+    try:
+        a.foo = 12
+    except (AttributeError, TypeError):
+        pass
+    else:
+        verify(0, "object() should not allow setting a foo attribute")
+    verify(not hasattr(object(), "__dict__"))
+
+    class Cdict(object):
+        pass
+    x = Cdict()
+    vereq(x.__dict__, {})
+    x.foo = 1
+    vereq(x.foo, 1)
+    vereq(x.__dict__, {'foo': 1})
+
+def slots():
+    if verbose: print "Testing __slots__..."
+    class C0(object):
+        __slots__ = []
+    x = C0()
+    verify(not hasattr(x, "__dict__"))
+    verify(not hasattr(x, "foo"))
+
+    class C1(object):
+        __slots__ = ['a']
+    x = C1()
+    verify(not hasattr(x, "__dict__"))
+    verify(not hasattr(x, "a"))
+    x.a = 1
+    vereq(x.a, 1)
+    x.a = None
+    veris(x.a, None)
+    del x.a
+    verify(not hasattr(x, "a"))
+
+    class C3(object):
+        __slots__ = ['a', 'b', 'c']
+    x = C3()
+    verify(not hasattr(x, "__dict__"))
+    verify(not hasattr(x, 'a'))
+    verify(not hasattr(x, 'b'))
+    verify(not hasattr(x, 'c'))
+    x.a = 1
+    x.b = 2
+    x.c = 3
+    vereq(x.a, 1)
+    vereq(x.b, 2)
+    vereq(x.c, 3)
+
+    class C4(object):
+        """Validate name mangling"""
+        __slots__ = ['__a']
+        def __init__(self, value):
+            self.__a = value
+        def get(self):
+            return self.__a
+    x = C4(5)
+    verify(not hasattr(x, '__dict__'))
+    verify(not hasattr(x, '__a'))
+    vereq(x.get(), 5)
+    try:
+        x.__a = 6
+    except AttributeError:
+        pass
+    else:
+        raise TestFailed, "Double underscored names not mangled"
+
+    # Make sure slot names are proper identifiers
+    try:
+        class C(object):
+            __slots__ = [None]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "[None] slots not caught"
+    try:
+        class C(object):
+            __slots__ = ["foo bar"]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "['foo bar'] slots not caught"
+    try:
+        class C(object):
+            __slots__ = ["foo\0bar"]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "['foo\\0bar'] slots not caught"
+    try:
+        class C(object):
+            __slots__ = ["1"]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "['1'] slots not caught"
+    try:
+        class C(object):
+            __slots__ = [""]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "[''] slots not caught"
+    class C(object):
+        __slots__ = ["a", "a_b", "_a", "A0123456789Z"]
+
+    # Test unicode slot names
+    try:
+        unichr
+    except NameError:
+        pass
+    else:
+        # _unicode_to_string used to modify slots in certain circumstances 
+        slots = (unicode("foo"), unicode("bar"))
+        class C(object):
+            __slots__ = slots
+        x = C()
+        x.foo = 5
+        vereq(x.foo, 5)
+        veris(type(slots[0]), unicode)
+        # this used to leak references
+        try:
+            class C(object):
+                __slots__ = [unichr(128)]
+        except (TypeError, UnicodeEncodeError):
+            pass
+        else:
+            raise TestFailed, "[unichr(128)] slots not caught" 
+
+    # Test leaks
+    class Counted(object):
+        counter = 0    # counts the number of instances alive
+        def __init__(self):
+            Counted.counter += 1
+        def __del__(self):
+            Counted.counter -= 1
+    class C(object):
+        __slots__ = ['a', 'b', 'c']
+    x = C()
+    x.a = Counted()
+    x.b = Counted()
+    x.c = Counted()
+    vereq(Counted.counter, 3)
+    del x
+    vereq(Counted.counter, 0)
+    class D(C):
+        pass
+    x = D()
+    x.a = Counted()
+    x.z = Counted()
+    vereq(Counted.counter, 2)
+    del x
+    vereq(Counted.counter, 0)
+    class E(D):
+        __slots__ = ['e']
+    x = E()
+    x.a = Counted()
+    x.z = Counted()
+    x.e = Counted()
+    vereq(Counted.counter, 3)
+    del x
+    vereq(Counted.counter, 0)
+
+    # Test cyclical leaks [SF bug 519621]
+    class F(object):
+        __slots__ = ['a', 'b']
+    log = []
+    s = F()
+    s.a = [Counted(), s]
+    vereq(Counted.counter, 1)
+    s = None
+    gc.collect()
+    gc.collect()
+    gc.collect()
+    vereq(Counted.counter, 0)
+
+    # Test lookup leaks [SF bug 572567]
+    import sys
+    class G(object):
+        def __cmp__(self, other):
+            return 0
+    g = G()
+    orig_objects = len(gc.get_objects())
+    for i in xrange(10):
+        g==g
+    new_objects = len(gc.get_objects())
+    vereq(orig_objects, new_objects)
+    class H(object):
+        __slots__ = ['a', 'b']
+        def __init__(self):
+            self.a = 1
+            self.b = 2
+        def __del__(self):
+            assert self.a == 1
+            assert self.b == 2
+
+    save_stderr = sys.stderr
+    sys.stderr = sys.stdout
+    h = H()
+    try:
+        del h
+    finally:
+        sys.stderr = save_stderr
+
+def slotspecials():
+    if verbose: print "Testing __dict__ and __weakref__ in __slots__..."
+
+    class D(object):
+        __slots__ = ["__dict__"]
+    a = D()
+    verify(hasattr(a, "__dict__"))
+    verify(not hasattr(a, "__weakref__"))
+    a.foo = 42
+    vereq(a.__dict__, {"foo": 42})
+
+    class W(object):
+        __slots__ = ["__weakref__"]
+    a = W()
+    verify(hasattr(a, "__weakref__"))
+    verify(not hasattr(a, "__dict__"))
+    try:
+        a.foo = 42
+    except AttributeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't be allowed to set a.foo"
+
+    class C1(W, D):
+        __slots__ = []
+    a = C1()
+    verify(hasattr(a, "__dict__"))
+    verify(hasattr(a, "__weakref__"))
+    a.foo = 42
+    vereq(a.__dict__, {"foo": 42})
+
+    class C2(D, W):
+        __slots__ = []
+    a = C2()
+    verify(hasattr(a, "__dict__"))
+    verify(hasattr(a, "__weakref__"))
+    a.foo = 42
+    vereq(a.__dict__, {"foo": 42})
+
+# MRO order disagreement
+#
+#    class C3(C1, C2):
+#        __slots__ = []
+#
+#    class C4(C2, C1):
+#        __slots__ = []
+
+def dynamics():
+    if verbose: print "Testing class attribute propagation..."
+    class D(object):
+        pass
+    class E(D):
+        pass
+    class F(D):
+        pass
+    D.foo = 1
+    vereq(D.foo, 1)
+    # Test that dynamic attributes are inherited
+    vereq(E.foo, 1)
+    vereq(F.foo, 1)
+    # Test dynamic instances
+    class C(object):
+        pass
+    a = C()
+    verify(not hasattr(a, "foobar"))
+    C.foobar = 2
+    vereq(a.foobar, 2)
+    C.method = lambda self: 42
+    vereq(a.method(), 42)
+    C.__repr__ = lambda self: "C()"
+    vereq(repr(a), "C()")
+    C.__int__ = lambda self: 100
+    vereq(int(a), 100)
+    vereq(a.foobar, 2)
+    verify(not hasattr(a, "spam"))
+    def mygetattr(self, name):
+        if name == "spam":
+            return "spam"
+        raise AttributeError
+    C.__getattr__ = mygetattr
+    vereq(a.spam, "spam")
+    a.new = 12
+    vereq(a.new, 12)
+    def mysetattr(self, name, value):
+        if name == "spam":
+            raise AttributeError
+        return object.__setattr__(self, name, value)
+    C.__setattr__ = mysetattr
+    try:
+        a.spam = "not spam"
+    except AttributeError:
+        pass
+    else:
+        verify(0, "expected AttributeError")
+    vereq(a.spam, "spam")
+    class D(C):
+        pass
+    d = D()
+    d.foo = 1
+    vereq(d.foo, 1)
+
+    # Test handling of int*seq and seq*int
+    class I(int):
+        pass
+    vereq("a"*I(2), "aa")
+    vereq(I(2)*"a", "aa")
+    vereq(2*I(3), 6)
+    vereq(I(3)*2, 6)
+    vereq(I(3)*I(2), 6)
+
+    # Test handling of long*seq and seq*long
+    class L(long):
+        pass
+    vereq("a"*L(2L), "aa")
+    vereq(L(2L)*"a", "aa")
+    vereq(2*L(3), 6)
+    vereq(L(3)*2, 6)
+    vereq(L(3)*L(2), 6)
+
+    # Test comparison of classes with dynamic metaclasses
+    class dynamicmetaclass(type):
+        pass
+    class someclass:
+        __metaclass__ = dynamicmetaclass
+    verify(someclass != object)
+
+def errors():
+    if verbose: print "Testing errors..."
+
+    try:
+        class C(list, dict):
+            pass
+    except TypeError:
+        pass
+    else:
+        verify(0, "inheritance from both list and dict should be illegal")
+
+    try:
+        class C(object, None):
+            pass
+    except TypeError:
+        pass
+    else:
+        verify(0, "inheritance from non-type should be illegal")
+    class Classic:
+        pass
+
+    try:
+        class C(type(len)):
+            pass
+    except TypeError:
+        pass
+    else:
+        verify(0, "inheritance from CFunction should be illegal")
+
+    try:
+        class C(object):
+            __slots__ = 1
+    except TypeError:
+        pass
+    else:
+        verify(0, "__slots__ = 1 should be illegal")
+
+    try:
+        class C(object):
+            __slots__ = [1]
+    except TypeError:
+        pass
+    else:
+        verify(0, "__slots__ = [1] should be illegal")
+
+def classmethods():
+    if verbose: print "Testing class methods..."
+    class C(object):
+        def foo(*a): return a
+        goo = classmethod(foo)
+    c = C()
+    vereq(C.goo(1), (C, 1))
+    vereq(c.goo(1), (C, 1))
+    vereq(c.foo(1), (c, 1))
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (D, 1))
+    vereq(d.goo(1), (D, 1))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+    # Test for a specific crash (SF bug 528132)
+    def f(cls, arg): return (cls, arg)
+    ff = classmethod(f)
+    vereq(ff.__get__(0, int)(42), (int, 42))
+    vereq(ff.__get__(0)(42), (int, 42))
+
+    # Test super() with classmethods (SF bug 535444)
+    veris(C.goo.im_self, C)
+    veris(D.goo.im_self, D)
+    veris(super(D,D).goo.im_self, D)
+    veris(super(D,d).goo.im_self, D)
+    vereq(super(D,D).goo(), (D,))
+    vereq(super(D,d).goo(), (D,))
+
+    # Verify that argument is checked for callability (SF bug 753451)
+    try:
+        classmethod(1).__get__(1)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "classmethod should check for callability"
+    # Verify that classmethod() doesn't allow keyword args
+    try:
+        classmethod(f, kw=1)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "classmethod shouldn't accept keyword args"
+
+
+def staticmethods():
+    if verbose: print "Testing static methods..."
+    class C(object):
+        def foo(*a): return a
+        goo = staticmethod(foo)
+    c = C()
+    vereq(C.goo(1), (1,))
+    vereq(c.goo(1), (1,))
+    vereq(c.foo(1), (c, 1,))
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (1,))
+    vereq(d.goo(1), (1,))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+
+
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (1,))
+    vereq(d.goo(1), (1,))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+
+def classic():
+    if verbose: print "Testing classic classes..."
+    class C:
+        def foo(*a): return a
+        goo = classmethod(foo)
+    c = C()
+    vereq(C.goo(1), (C, 1))
+    vereq(c.goo(1), (C, 1))
+    vereq(c.foo(1), (c, 1))
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (D, 1))
+    vereq(d.goo(1), (D, 1))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+    class E: # *not* subclassing from C
+        foo = C.foo
+    vereq(E().foo, C.foo) # i.e., unbound
+    verify(C.foo.__get__(C()).im_self is not None)
+
+def compattr():
+    if verbose: print "Testing computed attributes..."
+    class C(object):
+        class computed_attribute(object):
+            def __init__(self, get, set=None, delete=None):
+                self.__get = get
+                self.__set = set
+                self.__delete = delete
+            def __get__(self, obj, type=None):
+                return self.__get(obj)
+            def __set__(self, obj, value):
+                return self.__set(obj, value)
+            def __delete__(self, obj):
+                return self.__delete(obj)
+        def __init__(self):
+            self.__x = 0
+        def __get_x(self):
+            x = self.__x
+            self.__x = x+1
+            return x
+        def __set_x(self, x):
+            self.__x = x
+        def __delete_x(self):
+            del self.__x
+        x = computed_attribute(__get_x, __set_x, __delete_x)
+    a = C()
+    vereq(a.x, 0)
+    vereq(a.x, 1)
+    a.x = 10
+    vereq(a.x, 10)
+    vereq(a.x, 11)
+    del a.x
+    vereq(hasattr(a, 'x'), 0)
+
+def newslot():
+    if verbose: print "Testing __new__ slot override..."
+    class C(list):
+        def __new__(cls):
+            self = list.__new__(cls)
+            self.foo = 1
+            return self
+        def __init__(self):
+            self.foo = self.foo + 2
+    a = C()
+    vereq(a.foo, 3)
+    verify(a.__class__ is C)
+    class D(C):
+        pass
+    b = D()
+    vereq(b.foo, 3)
+    verify(b.__class__ is D)
+
+def altmro():
+    if verbose: print "Testing mro() and overriding it..."
+    class A(object):
+        def f(self): return "A"
+    class B(A):
+        pass
+    class C(A):
+        def f(self): return "C"
+    class D(B, C):
+        pass
+    vereq(D.mro(), [D, B, C, A, object])
+    vereq(D.__mro__, (D, B, C, A, object))
+    vereq(D().f(), "C")
+
+    class PerverseMetaType(type):
+        def mro(cls):
+            L = type.mro(cls)
+            L.reverse()
+            return L
+    class X(D,B,C,A):
+        __metaclass__ = PerverseMetaType
+    vereq(X.__mro__, (object, A, C, B, D, X))
+    vereq(X().f(), "A")
+
+    try:
+        class X(object):
+            class __metaclass__(type):
+                def mro(self):
+                    return [self, dict, object]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "devious mro() return not caught"
+
+    try:
+        class X(object):
+            class __metaclass__(type):
+                def mro(self):
+                    return [1]
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "non-class mro() return not caught"
+
+    try:
+        class X(object):
+            class __metaclass__(type):
+                def mro(self):
+                    return 1
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "non-sequence mro() return not caught"
+
+
+def overloading():
+    if verbose: print "Testing operator overloading..."
+
+    class B(object):
+        "Intermediate class because object doesn't have a __setattr__"
+
+    class C(B):
+
+        def __getattr__(self, name):
+            if name == "foo":
+                return ("getattr", name)
+            else:
+                raise AttributeError
+        def __setattr__(self, name, value):
+            if name == "foo":
+                self.setattr = (name, value)
+            else:
+                return B.__setattr__(self, name, value)
+        def __delattr__(self, name):
+            if name == "foo":
+                self.delattr = name
+            else:
+                return B.__delattr__(self, name)
+
+        def __getitem__(self, key):
+            return ("getitem", key)
+        def __setitem__(self, key, value):
+            self.setitem = (key, value)
+        def __delitem__(self, key):
+            self.delitem = key
+
+        def __getslice__(self, i, j):
+            return ("getslice", i, j)
+        def __setslice__(self, i, j, value):
+            self.setslice = (i, j, value)
+        def __delslice__(self, i, j):
+            self.delslice = (i, j)
+
+    a = C()
+    vereq(a.foo, ("getattr", "foo"))
+    a.foo = 12
+    vereq(a.setattr, ("foo", 12))
+    del a.foo
+    vereq(a.delattr, "foo")
+
+    vereq(a[12], ("getitem", 12))
+    a[12] = 21
+    vereq(a.setitem, (12, 21))
+    del a[12]
+    vereq(a.delitem, 12)
+
+    vereq(a[0:10], ("getslice", 0, 10))
+    a[0:10] = "foo"
+    vereq(a.setslice, (0, 10, "foo"))
+    del a[0:10]
+    vereq(a.delslice, (0, 10))
+
+def methods():
+    if verbose: print "Testing methods..."
+    class C(object):
+        def __init__(self, x):
+            self.x = x
+        def foo(self):
+            return self.x
+    c1 = C(1)
+    vereq(c1.foo(), 1)
+    class D(C):
+        boo = C.foo
+        goo = c1.foo
+    d2 = D(2)
+    vereq(d2.foo(), 2)
+    vereq(d2.boo(), 2)
+    vereq(d2.goo(), 1)
+    class E(object):
+        foo = C.foo
+    vereq(E().foo, C.foo) # i.e., unbound
+    verify(repr(C.foo.__get__(C(1))).startswith("<bound method "))
+
+def specials():
+    # Test operators like __hash__ for which a built-in default exists
+    if verbose: print "Testing special operators..."
+    # Test the default behavior for static classes
+    class C(object):
+        def __getitem__(self, i):
+            if 0 <= i < 10: return i
+            raise IndexError
+    c1 = C()
+    c2 = C()
+    verify(not not c1)
+    verify(id(c1) != id(c2))
+    hash(c1)
+    hash(c2)
+    vereq(cmp(c1, c2), cmp(id(c1), id(c2)))
+    vereq(c1, c1)
+    verify(c1 != c2)
+    verify(not c1 != c1)
+    verify(not c1 == c2)
+    # Note that the module name appears in str/repr, and that varies
+    # depending on whether this test is run standalone or from a framework.
+    verify(str(c1).find('C object at ') >= 0)
+    vereq(str(c1), repr(c1))
+    verify(-1 not in c1)
+    for i in range(10):
+        verify(i in c1)
+    verify(10 not in c1)
+    # Test the default behavior for dynamic classes
+    class D(object):
+        def __getitem__(self, i):
+            if 0 <= i < 10: return i
+            raise IndexError
+    d1 = D()
+    d2 = D()
+    verify(not not d1)
+    verify(id(d1) != id(d2))
+    hash(d1)
+    hash(d2)
+    vereq(cmp(d1, d2), cmp(id(d1), id(d2)))
+    vereq(d1, d1)
+    verify(d1 != d2)
+    verify(not d1 != d1)
+    verify(not d1 == d2)
+    # Note that the module name appears in str/repr, and that varies
+    # depending on whether this test is run standalone or from a framework.
+    verify(str(d1).find('D object at ') >= 0)
+    vereq(str(d1), repr(d1))
+    verify(-1 not in d1)
+    for i in range(10):
+        verify(i in d1)
+    verify(10 not in d1)
+    # Test overridden behavior for static classes
+    class Proxy(object):
+        def __init__(self, x):
+            self.x = x
+        def __nonzero__(self):
+            return not not self.x
+        def __hash__(self):
+            return hash(self.x)
+        def __eq__(self, other):
+            return self.x == other
+        def __ne__(self, other):
+            return self.x != other
+        def __cmp__(self, other):
+            return cmp(self.x, other.x)
+        def __str__(self):
+            return "Proxy:%s" % self.x
+        def __repr__(self):
+            return "Proxy(%r)" % self.x
+        def __contains__(self, value):
+            return value in self.x
+    p0 = Proxy(0)
+    p1 = Proxy(1)
+    p_1 = Proxy(-1)
+    verify(not p0)
+    verify(not not p1)
+    vereq(hash(p0), hash(0))
+    vereq(p0, p0)
+    verify(p0 != p1)
+    verify(not p0 != p0)
+    vereq(not p0, p1)
+    vereq(cmp(p0, p1), -1)
+    vereq(cmp(p0, p0), 0)
+    vereq(cmp(p0, p_1), 1)
+    vereq(str(p0), "Proxy:0")
+    vereq(repr(p0), "Proxy(0)")
+    p10 = Proxy(range(10))
+    verify(-1 not in p10)
+    for i in range(10):
+        verify(i in p10)
+    verify(10 not in p10)
+    # Test overridden behavior for dynamic classes
+    class DProxy(object):
+        def __init__(self, x):
+            self.x = x
+        def __nonzero__(self):
+            return not not self.x
+        def __hash__(self):
+            return hash(self.x)
+        def __eq__(self, other):
+            return self.x == other
+        def __ne__(self, other):
+            return self.x != other
+        def __cmp__(self, other):
+            return cmp(self.x, other.x)
+        def __str__(self):
+            return "DProxy:%s" % self.x
+        def __repr__(self):
+            return "DProxy(%r)" % self.x
+        def __contains__(self, value):
+            return value in self.x
+    p0 = DProxy(0)
+    p1 = DProxy(1)
+    p_1 = DProxy(-1)
+    verify(not p0)
+    verify(not not p1)
+    vereq(hash(p0), hash(0))
+    vereq(p0, p0)
+    verify(p0 != p1)
+    verify(not p0 != p0)
+    vereq(not p0, p1)
+    vereq(cmp(p0, p1), -1)
+    vereq(cmp(p0, p0), 0)
+    vereq(cmp(p0, p_1), 1)
+    vereq(str(p0), "DProxy:0")
+    vereq(repr(p0), "DProxy(0)")
+    p10 = DProxy(range(10))
+    verify(-1 not in p10)
+    for i in range(10):
+        verify(i in p10)
+    verify(10 not in p10)
+    # Safety test for __cmp__
+    def unsafecmp(a, b):
+        try:
+            a.__class__.__cmp__(a, b)
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "shouldn't allow %s.__cmp__(%r, %r)" % (
+                a.__class__, a, b)
+    # unicode, int, float and long does not have a __cmp__ in PyPy
+    # unsafecmp(u"123", "123")
+    # unsafecmp("123", u"123")
+    # unsafecmp(1, 1.0)
+    # unsafecmp(1.0, 1)
+    # unsafecmp(1, 1L)
+    # unsafecmp(1L, 1)
+
+    class Letter(str):
+        def __new__(cls, letter):
+            if letter == 'EPS':
+                return str.__new__(cls)
+            return str.__new__(cls, letter)
+        def __str__(self):
+            if not self:
+                return 'EPS'
+            return self
+
+    # sys.stdout needs to be the original to trigger the recursion bug
+    import sys
+    test_stdout = sys.stdout
+    sys.stdout = get_original_stdout()
+    try:
+        # nothing should actually be printed, this should raise an exception
+        print Letter('w')
+    except RuntimeError:
+        pass
+    else:
+        raise TestFailed, "expected a RuntimeError for print recursion"
+    sys.stdout = test_stdout
+
+def weakrefs():
+    if verbose: print "Testing weak references..."
+    import weakref
+    class C(object):
+        pass
+    c = C()
+    r = weakref.ref(c)
+    verify(r() is c)
+    del c
+    gc.collect()
+    gc.collect()
+    gc.collect()
+    verify(r() is None)
+    del r
+    class NoWeak(object):
+        __slots__ = ['foo']
+    no = NoWeak()
+    try:
+        weakref.ref(no)
+    except TypeError, msg:
+        verify(str(msg).find("weak reference") >= 0)
+    else:
+        verify(0, "weakref.ref(no) should be illegal")
+    class Weak(object):
+        __slots__ = ['foo', '__weakref__']
+    yes = Weak()
+    r = weakref.ref(yes)
+    verify(r() is yes)
+    del yes
+    gc.collect()
+    gc.collect()
+    gc.collect()
+    verify(r() is None)
+    del r
+
+def properties():
+    if verbose: print "Testing property..."
+    class C(object):
+        def getx(self):
+            return self.__x
+        def setx(self, value):
+            self.__x = value
+        def delx(self):
+            del self.__x
+        x = property(getx, setx, delx, doc="I'm the x property.")
+    a = C()
+    verify(not hasattr(a, "x"))
+    a.x = 42
+    vereq(a._C__x, 42)
+    vereq(a.x, 42)
+    del a.x
+    verify(not hasattr(a, "x"))
+    verify(not hasattr(a, "_C__x"))
+    C.x.__set__(a, 100)
+    vereq(C.x.__get__(a), 100)
+    C.x.__delete__(a)
+    verify(not hasattr(a, "x"))
+
+    raw = C.__dict__['x']
+    verify(isinstance(raw, property))
+
+    attrs = dir(raw)
+    verify("__doc__" in attrs)
+    verify("fget" in attrs)
+    verify("fset" in attrs)
+    verify("fdel" in attrs)
+
+    vereq(raw.__doc__, "I'm the x property.")
+    verify(raw.fget is C.__dict__['getx'])
+    verify(raw.fset is C.__dict__['setx'])
+    verify(raw.fdel is C.__dict__['delx'])
+
+    for attr in "__doc__", "fget", "fset", "fdel":
+        try:
+            setattr(raw, attr, 42)
+        except TypeError, msg:
+            if str(msg).find('readonly') < 0:
+                raise TestFailed("when setting readonly attr %r on a "
+                                 "property, got unexpected TypeError "
+                                 "msg %r" % (attr, str(msg)))
+        else:
+            raise TestFailed("expected TypeError from trying to set "
+                             "readonly %r attr on a property" % attr)
+
+    class D(object):
+        __getitem__ = property(lambda s: 1/0)
+
+    d = D()
+    try:
+        for i in d:
+            str(i)
+    except ZeroDivisionError:
+        pass
+    else:
+        raise TestFailed, "expected ZeroDivisionError from bad property"
+
+    class E(object):
+        def getter(self):
+            "getter method"
+            return 0
+        def setter(self, value):
+            "setter method"
+            pass
+        prop = property(getter)
+        vereq(prop.__doc__, "getter method")
+        prop2 = property(fset=setter)
+        vereq(prop2.__doc__, None)
+
+    # this segfaulted in 2.5b2
+    try:
+        import _testcapi
+    except ImportError:
+        pass
+    else:
+        class X(object):
+            p = property(_testcapi.test_with_docstring)
+
+
+def supers():
+    if verbose: print "Testing super..."
+
+    class A(object):
+        def meth(self, a):
+            return "A(%r)" % a
+
+    vereq(A().meth(1), "A(1)")
+
+    class B(A):
+        def __init__(self):
+            self.__super = super(B, self)
+        def meth(self, a):
+            return "B(%r)" % a + self.__super.meth(a)
+
+    vereq(B().meth(2), "B(2)A(2)")
+
+    class C(A):
+        def meth(self, a):
+            return "C(%r)" % a + self.__super.meth(a)
+    C._C__super = super(C)
+
+    vereq(C().meth(3), "C(3)A(3)")
+
+    class D(C, B):
+        def meth(self, a):
+            return "D(%r)" % a + super(D, self).meth(a)
+
+    vereq(D().meth(4), "D(4)C(4)B(4)A(4)")
+
+    # Test for subclassing super
+
+    class mysuper(super):
+        def __init__(self, *args):
+            return super(mysuper, self).__init__(*args)
+
+    class E(D):
+        def meth(self, a):
+            return "E(%r)" % a + mysuper(E, self).meth(a)
+
+    vereq(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
+
+    class F(E):
+        def meth(self, a):
+            s = self.__super # == mysuper(F, self)
+            return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
+    F._F__super = mysuper(F)
+
+    vereq(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
+
+    # Make sure certain errors are raised
+
+    try:
+        super(D, 42)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D, 42)"
+
+    try:
+        super(D, C())
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D, C())"
+
+    try:
+        super(D).__get__(12)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D).__get__(12)"
+
+    try:
+        super(D).__get__(C())
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D).__get__(C())"
+
+    # Make sure data descriptors can be overridden and accessed via super
+    # (new feature in Python 2.3)
+
+    class DDbase(object):
+        def getx(self): return 42
+        x = property(getx)
+
+    class DDsub(DDbase):
+        def getx(self): return "hello"
+        x = property(getx)
+
+    dd = DDsub()
+    vereq(dd.x, "hello")
+    vereq(super(DDsub, dd).x, 42)
+
+    # Ensure that super() lookup of descriptor from classmethod
+    # works (SF ID# 743627)
+
+    class Base(object):
+        aProp = property(lambda self: "foo")
+
+    class Sub(Base):
+        @classmethod
+        def test(klass):
+            return super(Sub,klass).aProp
+
+    veris(Sub.test(), Base.aProp)
+
+    # Verify that super() doesn't allow keyword args
+    try:
+        super(Base, kw=1)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "super shouldn't accept keyword args"
+
+def inherits():
+    if verbose: print "Testing inheritance from basic types..."
+
+    class hexint(int):
+        def __repr__(self):
+            return hex(self)
+        def __add__(self, other):
+            return hexint(int.__add__(self, other))
+        # (Note that overriding __radd__ doesn't work,
+        # because the int type gets first dibs.)
+    vereq(repr(hexint(7) + 9), "0x10")
+    vereq(repr(hexint(1000) + 7), "0x3ef")
+    a = hexint(12345)
+    vereq(a, 12345)
+    vereq(int(a), 12345)
+    verify(int(a).__class__ is int)
+    vereq(hash(a), hash(12345))
+    verify((+a).__class__ is int)
+    verify((a >> 0).__class__ is int)
+    verify((a << 0).__class__ is int)
+    verify((hexint(0) << 12).__class__ is int)
+    verify((hexint(0) >> 12).__class__ is int)
+
+    class octlong(long):
+        __slots__ = []
+        def __str__(self):
+            s = oct(self)
+            if s[-1] == 'L':
+                s = s[:-1]
+            return s
+        def __add__(self, other):
+            return self.__class__(super(octlong, self).__add__(other))
+        __radd__ = __add__
+    vereq(str(octlong(3) + 5), "010")
+    # (Note that overriding __radd__ here only seems to work
+    # because the example uses a short int left argument.)
+    vereq(str(5 + octlong(3000)), "05675")
+    a = octlong(12345)
+    vereq(a, 12345L)
+    vereq(long(a), 12345L)
+    vereq(hash(a), hash(12345L))
+    verify(long(a).__class__ is long)
+    verify((+a).__class__ is long)
+    verify((-a).__class__ is long)
+    verify((-octlong(0)).__class__ is long)
+    verify((a >> 0).__class__ is long)
+    verify((a << 0).__class__ is long)
+    verify((a - 0).__class__ is long)
+    verify((a * 1).__class__ is long)
+    verify((a ** 1).__class__ is long)
+    verify((a // 1).__class__ is long)
+    verify((1 * a).__class__ is long)
+    verify((a | 0).__class__ is long)
+    verify((a ^ 0).__class__ is long)
+    verify((a & -1L).__class__ is long)
+    verify((octlong(0) << 12).__class__ is long)
+    verify((octlong(0) >> 12).__class__ is long)
+    verify(abs(octlong(0)).__class__ is long)
+
+    # Because octlong overrides __add__, we can't check the absence of +0
+    # optimizations using octlong.
+    class longclone(long):
+        pass
+    a = longclone(1)
+    verify((a + 0).__class__ is long)
+    verify((0 + a).__class__ is long)
+
+    # Check that negative clones don't segfault
+    a = longclone(-1)
+    vereq(a.__dict__, {})
+    vereq(long(a), -1)  # verify PyNumber_Long() copies the sign bit
+
+    class precfloat(float):
+        __slots__ = ['prec']
+        def __init__(self, value=0.0, prec=12):
+            self.prec = int(prec)
+            float.__init__(self, value)
+        def __repr__(self):
+            return "%.*g" % (self.prec, self)
+    vereq(repr(precfloat(1.1)), "1.1")
+    a = precfloat(12345)
+    vereq(a, 12345.0)
+    vereq(float(a), 12345.0)
+    verify(float(a).__class__ is float)
+    vereq(hash(a), hash(12345.0))
+    verify((+a).__class__ is float)
+
+    class madcomplex(complex):
+        def __repr__(self):
+            return "%.17gj%+.17g" % (self.imag, self.real)
+    a = madcomplex(-3, 4)
+    vereq(repr(a), "4j-3")
+    base = complex(-3, 4)
+    veris(base.__class__, complex)
+    vereq(a, base)
+    vereq(complex(a), base)
+    veris(complex(a).__class__, complex)
+    a = madcomplex(a)  # just trying another form of the constructor
+    vereq(repr(a), "4j-3")
+    vereq(a, base)
+    vereq(complex(a), base)
+    veris(complex(a).__class__, complex)
+    vereq(hash(a), hash(base))
+    veris((+a).__class__, complex)
+    veris((a + 0).__class__, complex)
+    vereq(a + 0, base)
+    veris((a - 0).__class__, complex)
+    vereq(a - 0, base)
+    veris((a * 1).__class__, complex)
+    vereq(a * 1, base)
+    veris((a / 1).__class__, complex)
+    vereq(a / 1, base)
+
+    class madtuple(tuple):
+        _rev = None
+        def rev(self):
+            if self._rev is not None:
+                return self._rev
+            L = list(self)
+            L.reverse()
+            self._rev = self.__class__(L)
+            return self._rev
+    a = madtuple((1,2,3,4,5,6,7,8,9,0))
+    vereq(a, (1,2,3,4,5,6,7,8,9,0))
+    vereq(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
+    vereq(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
+    for i in range(10):
+        t = madtuple(range(i))
+        u = t.rev()
+        v = u.rev()
+        vereq(v, t)
+    a = madtuple((1,2,3,4,5))
+    vereq(tuple(a), (1,2,3,4,5))
+    verify(tuple(a).__class__ is tuple)
+    vereq(hash(a), hash((1,2,3,4,5)))
+    verify(a[:].__class__ is tuple)
+    verify((a * 1).__class__ is tuple)
+    verify((a * 0).__class__ is tuple)
+    verify((a + ()).__class__ is tuple)
+    a = madtuple(())
+    vereq(tuple(a), ())
+    verify(tuple(a).__class__ is tuple)
+    verify((a + a).__class__ is tuple)
+    verify((a * 0).__class__ is tuple)
+    verify((a * 1).__class__ is tuple)
+    verify((a * 2).__class__ is tuple)
+    verify(a[:].__class__ is tuple)
+
+    class madstring(str):
+        _rev = None
+        def rev(self):
+            if self._rev is not None:
+                return self._rev
+            L = list(self)
+            L.reverse()
+            self._rev = self.__class__("".join(L))
+            return self._rev
+    s = madstring("abcdefghijklmnopqrstuvwxyz")
+    vereq(s, "abcdefghijklmnopqrstuvwxyz")
+    vereq(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
+    vereq(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
+    for i in range(256):
+        s = madstring("".join(map(chr, range(i))))
+        t = s.rev()
+        u = t.rev()
+        vereq(u, s)
+    s = madstring("12345")
+    vereq(str(s), "12345")
+    verify(str(s).__class__ is str)
+
+    base = "\x00" * 5
+    s = madstring(base)
+    vereq(s, base)
+    vereq(str(s), base)
+    verify(str(s).__class__ is str)
+    vereq(hash(s), hash(base))
+    vereq({s: 1}[base], 1)
+    vereq({base: 1}[s], 1)
+    verify((s + "").__class__ is str)
+    vereq(s + "", base)
+    verify(("" + s).__class__ is str)
+    vereq("" + s, base)
+    verify((s * 0).__class__ is str)
+    vereq(s * 0, "")
+    verify((s * 1).__class__ is str)
+    vereq(s * 1, base)
+    verify((s * 2).__class__ is str)
+    vereq(s * 2, base + base)
+    verify(s[:].__class__ is str)
+    vereq(s[:], base)
+    verify(s[0:0].__class__ is str)
+    vereq(s[0:0], "")
+    verify(s.strip().__class__ is str)
+    vereq(s.strip(), base)
+    verify(s.lstrip().__class__ is str)
+    vereq(s.lstrip(), base)
+    verify(s.rstrip().__class__ is str)
+    vereq(s.rstrip(), base)
+    identitytab = ''.join([chr(i) for i in range(256)])
+    verify(s.translate(identitytab).__class__ is str)
+    vereq(s.translate(identitytab), base)
+    verify(s.translate(identitytab, "x").__class__ is str)
+    vereq(s.translate(identitytab, "x"), base)
+    vereq(s.translate(identitytab, "\x00"), "")
+    verify(s.replace("x", "x").__class__ is str)
+    vereq(s.replace("x", "x"), base)
+    verify(s.ljust(len(s)).__class__ is str)
+    vereq(s.ljust(len(s)), base)
+    verify(s.rjust(len(s)).__class__ is str)
+    vereq(s.rjust(len(s)), base)
+    verify(s.center(len(s)).__class__ is str)
+    vereq(s.center(len(s)), base)
+    verify(s.lower().__class__ is str)
+    vereq(s.lower(), base)
+
+    class madunicode(unicode):
+        _rev = None
+        def rev(self):
+            if self._rev is not None:
+                return self._rev
+            L = list(self)
+            L.reverse()
+            self._rev = self.__class__(u"".join(L))
+            return self._rev
+    u = madunicode("ABCDEF")
+    vereq(u, u"ABCDEF")
+    vereq(u.rev(), madunicode(u"FEDCBA"))
+    vereq(u.rev().rev(), madunicode(u"ABCDEF"))
+    base = u"12345"
+    u = madunicode(base)
+    vereq(unicode(u), base)
+    verify(unicode(u).__class__ is unicode)
+    vereq(hash(u), hash(base))
+    vereq({u: 1}[base], 1)
+    vereq({base: 1}[u], 1)
+    verify(u.strip().__class__ is unicode)
+    vereq(u.strip(), base)
+    verify(u.lstrip().__class__ is unicode)
+    vereq(u.lstrip(), base)
+    verify(u.rstrip().__class__ is unicode)
+    vereq(u.rstrip(), base)
+    verify(u.replace(u"x", u"x").__class__ is unicode)
+    vereq(u.replace(u"x", u"x"), base)
+    verify(u.replace(u"xy", u"xy").__class__ is unicode)
+    vereq(u.replace(u"xy", u"xy"), base)
+    verify(u.center(len(u)).__class__ is unicode)
+    vereq(u.center(len(u)), base)
+    verify(u.ljust(len(u)).__class__ is unicode)
+    vereq(u.ljust(len(u)), base)
+    verify(u.rjust(len(u)).__class__ is unicode)
+    vereq(u.rjust(len(u)), base)
+    verify(u.lower().__class__ is unicode)
+    vereq(u.lower(), base)
+    verify(u.upper().__class__ is unicode)
+    vereq(u.upper(), base)
+    verify(u.capitalize().__class__ is unicode)
+    vereq(u.capitalize(), base)
+    verify(u.title().__class__ is unicode)
+    vereq(u.title(), base)
+    verify((u + u"").__class__ is unicode)
+    vereq(u + u"", base)
+    verify((u"" + u).__class__ is unicode)
+    vereq(u"" + u, base)
+    verify((u * 0).__class__ is unicode)
+    vereq(u * 0, u"")
+    verify((u * 1).__class__ is unicode)
+    vereq(u * 1, base)
+    verify((u * 2).__class__ is unicode)
+    vereq(u * 2, base + base)
+    verify(u[:].__class__ is unicode)
+    vereq(u[:], base)
+    verify(u[0:0].__class__ is unicode)
+    vereq(u[0:0], u"")
+
+    class sublist(list):
+        pass
+    a = sublist(range(5))
+    vereq(a, range(5))
+    a.append("hello")
+    vereq(a, range(5) + ["hello"])
+    a[5] = 5
+    vereq(a, range(6))
+    a.extend(range(6, 20))
+    vereq(a, range(20))
+    a[-5:] = []
+    vereq(a, range(15))
+    del a[10:15]
+    vereq(len(a), 10)
+    vereq(a, range(10))
+    vereq(list(a), range(10))
+    vereq(a[0], 0)
+    vereq(a[9], 9)
+    vereq(a[-10], 0)
+    vereq(a[-1], 9)
+    vereq(a[:5], range(5))
+
+    class CountedInput(file):
+        """Counts lines read by self.readline().
+
+        self.lineno is the 0-based ordinal of the last line read, up to
+        a maximum of one greater than the number of lines in the file.
+
+        self.ateof is true if and only if the final "" line has been read,
+        at which point self.lineno stops incrementing, and further calls
+        to readline() continue to return "".
+        """
+
+        lineno = 0
+        ateof = 0
+        def readline(self):
+            if self.ateof:
+                return ""
+            s = file.readline(self)
+            # Next line works too.
+            # s = super(CountedInput, self).readline()
+            self.lineno += 1
+            if s == "":
+                self.ateof = 1
+            return s
+
+    f = file(name=TESTFN, mode='w')
+    lines = ['a\n', 'b\n', 'c\n']
+    try:
+        f.writelines(lines)
+        f.close()
+        f = CountedInput(TESTFN)
+        for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
+            got = f.readline()
+            vereq(expected, got)
+            vereq(f.lineno, i)
+            vereq(f.ateof, (i > len(lines)))
+        f.close()
+    finally:
+        try:
+            f.close()
+        except:
+            pass
+        try:
+            import os
+            os.unlink(TESTFN)
+        except:
+            pass
+
+def keywords():
+    if verbose:
+        print "Testing keyword args to basic type constructors ..."
+    vereq(int(x=1), 1)
+    vereq(float(x=2), 2.0)
+    vereq(long(x=3), 3L)
+    vereq(complex(imag=42, real=666), complex(666, 42))
+    vereq(str(object=500), '500')
+    vereq(unicode(string='abc', errors='strict'), u'abc')
+    vereq(tuple(sequence=range(3)), (0, 1, 2))
+    vereq(list(sequence=(0, 1, 2)), range(3))
+    # note: as of Python 2.3, dict() no longer has an "items" keyword arg
+
+    for constructor in (int, float, long, complex, str, unicode,
+                        tuple, list, file):
+        try:
+            constructor(bogus_keyword_arg=1)
+        except TypeError:
+            pass
+        else:
+            raise TestFailed("expected TypeError from bogus keyword "
+                             "argument to %r" % constructor)
+
+def restricted():
+    # XXX This test is disabled because rexec is not deemed safe
+    return
+    import rexec
+    if verbose:
+        print "Testing interaction with restricted execution ..."
+
+    sandbox = rexec.RExec()
+
+    code1 = """f = open(%r, 'w')""" % TESTFN
+    code2 = """f = file(%r, 'w')""" % TESTFN
+    code3 = """\
+f = open(%r)
+t = type(f)  # a sneaky way to get the file() constructor
+f.close()
+f = t(%r, 'w')  # rexec can't catch this by itself
+""" % (TESTFN, TESTFN)
+
+    f = open(TESTFN, 'w')  # Create the file so code3 can find it.
+    f.close()
+
+    try:
+        for code in code1, code2, code3:
+            try:
+                sandbox.r_exec(code)
+            except IOError, msg:
+                if str(msg).find("restricted") >= 0:
+                    outcome = "OK"
+                else:
+                    outcome = "got an exception, but not an expected one"
+            else:
+                outcome = "expected a restricted-execution exception"
+
+            if outcome != "OK":
+                raise TestFailed("%s, in %r" % (outcome, code))
+
+    finally:
+        try:
+            import os
+            os.unlink(TESTFN)
+        except:
+            pass
+
+def str_subclass_as_dict_key():
+    if verbose:
+        print "Testing a str subclass used as dict key .."
+
+    class cistr(str):
+        """Sublcass of str that computes __eq__ case-insensitively.
+
+        Also computes a hash code of the string in canonical form.
+        """
+
+        def __init__(self, value):
+            self.canonical = value.lower()
+            self.hashcode = hash(self.canonical)
+
+        def __eq__(self, other):
+            if not isinstance(other, cistr):
+                other = cistr(other)
+            return self.canonical == other.canonical
+
+        def __hash__(self):
+            return self.hashcode
+
+    vereq(cistr('ABC'), 'abc')
+    vereq('aBc', cistr('ABC'))
+    vereq(str(cistr('ABC')), 'ABC')
+
+    d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
+    vereq(d[cistr('one')], 1)
+    vereq(d[cistr('tWo')], 2)
+    vereq(d[cistr('THrEE')], 3)
+    verify(cistr('ONe') in d)
+    vereq(d.get(cistr('thrEE')), 3)
+
+def classic_comparisons():
+    if verbose: print "Testing classic comparisons..."
+    class classic:
+        pass
+    for base in (classic, int, object):
+        if verbose: print "        (base = %s)" % base
+        class C(base):
+            def __init__(self, value):
+                self.value = int(value)
+            def __cmp__(self, other):
+                if isinstance(other, C):
+                    return cmp(self.value, other.value)
+                if isinstance(other, int) or isinstance(other, long):
+                    return cmp(self.value, other)
+                return NotImplemented
+        c1 = C(1)
+        c2 = C(2)
+        c3 = C(3)
+        vereq(c1, 1)
+        c = {1: c1, 2: c2, 3: c3}
+        for x in 1, 2, 3:
+            for y in 1, 2, 3:
+                verify(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
+                for op in "<", "<=", "==", "!=", ">", ">=":
+                    verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+                verify(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y))
+                verify(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
+
+def rich_comparisons():
+    if verbose:
+        print "Testing rich comparisons..."
+    class Z(complex):
+        pass
+    z = Z(1)
+    vereq(z, 1+0j)
+    vereq(1+0j, z)
+    class ZZ(complex):
+        def __eq__(self, other):
+            try:
+                return abs(self - other) <= 1e-6
+            except:
+                return NotImplemented
+    zz = ZZ(1.0000003)
+    vereq(zz, 1+0j)
+    vereq(1+0j, zz)
+
+    class classic:
+        pass
+    for base in (classic, int, object, list):
+        if verbose: print "        (base = %s)" % base
+        class C(base):
+            def __init__(self, value):
+                self.value = int(value)
+            def __cmp__(self, other):
+                raise TestFailed, "shouldn't call __cmp__"
+            def __eq__(self, other):
+                if isinstance(other, C):
+                    return self.value == other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value == other
+                return NotImplemented
+            def __ne__(self, other):
+                if isinstance(other, C):
+                    return self.value != other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value != other
+                return NotImplemented
+            def __lt__(self, other):
+                if isinstance(other, C):
+                    return self.value < other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value < other
+                return NotImplemented
+            def __le__(self, other):
+                if isinstance(other, C):
+                    return self.value <= other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value <= other
+                return NotImplemented
+            def __gt__(self, other):
+                if isinstance(other, C):
+                    return self.value > other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value > other
+                return NotImplemented
+            def __ge__(self, other):
+                if isinstance(other, C):
+                    return self.value >= other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value >= other
+                return NotImplemented
+        c1 = C(1)
+        c2 = C(2)
+        c3 = C(3)
+        vereq(c1, 1)
+        c = {1: c1, 2: c2, 3: c3}
+        for x in 1, 2, 3:
+            for y in 1, 2, 3:
+                for op in "<", "<=", "==", "!=", ">", ">=":
+                    verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+                    verify(eval("c[x] %s y" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+                    verify(eval("x %s c[y]" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+
+def coercions():
+    if verbose: print "Testing coercions..."
+    class I(int): pass
+    coerce(I(0), 0)
+    coerce(0, I(0))
+    class L(long): pass
+    coerce(L(0), 0)
+    coerce(L(0), 0L)
+    coerce(0, L(0))
+    coerce(0L, L(0))
+    class F(float): pass
+    coerce(F(0), 0)
+    coerce(F(0), 0L)
+    coerce(F(0), 0.)
+    coerce(0, F(0))
+    coerce(0L, F(0))
+    coerce(0., F(0))
+    class C(complex): pass
+    coerce(C(0), 0)
+    coerce(C(0), 0L)
+    coerce(C(0), 0.)
+    coerce(C(0), 0j)
+    coerce(0, C(0))
+    coerce(0L, C(0))
+    coerce(0., C(0))
+    coerce(0j, C(0))
+
+def descrdoc():
+    if verbose: print "Testing descriptor doc strings..."
+    def check(descr, what):
+        vereq(descr.__doc__, what)
+    check(file.closed, "True if the file is closed") # getset descriptor
+    check(file.name, "file name") # member descriptor
+
+def setclass():
+    if verbose: print "Testing __class__ assignment..."
+    class C(object): pass
+    class D(object): pass
+    class E(object): pass
+    class F(D, E): pass
+    for cls in C, D, E, F:
+        for cls2 in C, D, E, F:
+            x = cls()
+            x.__class__ = cls2
+            verify(x.__class__ is cls2)
+            x.__class__ = cls
+            verify(x.__class__ is cls)
+    def cant(x, C):
+        try:
+            x.__class__ = C
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "shouldn't allow %r.__class__ = %r" % (x, C)
+        try:
+            delattr(x, "__class__")
+        except (TypeError, AttributeError):
+            pass
+        else:
+            raise TestFailed, "shouldn't allow del %r.__class__" % x
+    cant(C(), list)
+    cant(list(), C)
+    cant(C(), 1)
+    cant(C(), object)
+    cant(object(), list)
+    cant(list(), object)
+    class Int(int): __slots__ = []
+    cant(2, Int)
+    cant(Int(), int)
+    cant(True, int)
+    cant(2, bool)
+    o = object()
+    cant(o, type(1))
+    cant(o, type(None))
+    del o
+
+def setdict():
+    if verbose: print "Testing __dict__ assignment..."
+    class C(object): pass
+    a = C()
+    a.__dict__ = {'b': 1}
+    vereq(a.b, 1)
+    def cant(x, dict):
+        try:
+            x.__dict__ = dict
+        except (AttributeError, TypeError):
+            pass
+        else:
+            raise TestFailed, "shouldn't allow %r.__dict__ = %r" % (x, dict)
+    cant(a, None)
+    cant(a, [])
+    cant(a, 1)
+    del a.__dict__ # Deleting __dict__ is allowed
+    # Classes don't allow __dict__ assignment
+    cant(C, {})
+
+def pickles():
+    if verbose:
+        print "Testing pickling and copying new-style classes and objects..."
+    import pickle, cPickle
+
+    def sorteditems(d):
+        L = d.items()
+        L.sort()
+        return L
+
+    global C
+    class C(object):
+        def __init__(self, a, b):
+            super(C, self).__init__()
+            self.a = a
+            self.b = b
+        def __repr__(self):
+            return "C(%r, %r)" % (self.a, self.b)
+
+    global C1
+    class C1(list):
+        def __new__(cls, a, b):
+            return super(C1, cls).__new__(cls)
+        def __getnewargs__(self):
+            return (self.a, self.b)
+        def __init__(self, a, b):
+            self.a = a
+            self.b = b
+        def __repr__(self):
+            return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
+
+    global C2
+    class C2(int):
+        def __new__(cls, a, b, val=0):
+            return super(C2, cls).__new__(cls, val)
+        def __getnewargs__(self):
+            return (self.a, self.b, int(self))
+        def __init__(self, a, b, val=0):
+            self.a = a
+            self.b = b
+        def __repr__(self):
+            return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
+
+    global C3
+    class C3(object):
+        def __init__(self, foo):
+            self.foo = foo
+        def __getstate__(self):
+            return self.foo
+        def __setstate__(self, foo):
+            self.foo = foo
+
+    global C4classic, C4
+    class C4classic: # classic
+        pass
+    class C4(C4classic, object): # mixed inheritance
+        pass
+
+    for p in pickle, cPickle:
+        for bin in 0, 1:
+            if verbose:
+                print p.__name__, ["text", "binary"][bin]
+
+            for cls in C, C1, C2:
+                s = p.dumps(cls, bin)
+                cls2 = p.loads(s)
+                verify(cls2 is cls)
+
+            a = C1(1, 2); a.append(42); a.append(24)
+            b = C2("hello", "world", 42)
+            s = p.dumps((a, b), bin)
+            x, y = p.loads(s)
+            vereq(x.__class__, a.__class__)
+            vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
+            vereq(y.__class__, b.__class__)
+            vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
+            vereq(repr(x), repr(a))
+            vereq(repr(y), repr(b))
+            if verbose:
+                print "a = x =", a
+                print "b = y =", b
+            # Test for __getstate__ and __setstate__ on new style class
+            u = C3(42)
+            s = p.dumps(u, bin)
+            v = p.loads(s)
+            veris(u.__class__, v.__class__)
+            vereq(u.foo, v.foo)
+            # Test for picklability of hybrid class
+            u = C4()
+            u.foo = 42
+            s = p.dumps(u, bin)
+            v = p.loads(s)
+            veris(u.__class__, v.__class__)
+            vereq(u.foo, v.foo)
+
+    # Testing copy.deepcopy()
+    if verbose:
+        print "deepcopy"
+    import copy
+    for cls in C, C1, C2:
+        cls2 = copy.deepcopy(cls)
+        verify(cls2 is cls)
+
+    a = C1(1, 2); a.append(42); a.append(24)
+    b = C2("hello", "world", 42)
+    x, y = copy.deepcopy((a, b))
+    vereq(x.__class__, a.__class__)
+    vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
+    vereq(y.__class__, b.__class__)
+    vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
+    vereq(repr(x), repr(a))
+    vereq(repr(y), repr(b))
+    if verbose:
+        print "a = x =", a
+        print "b = y =", b
+
+def pickleslots():
+    if verbose: print "Testing pickling of classes with __slots__ ..."
+    import pickle, cPickle
+    # Pickling of classes with __slots__ but without __getstate__ should fail
+    global B, C, D, E
+    class B(object):
+        pass
+    for base in [object, B]:
+        class C(base):
+            __slots__ = ['a']
+        class D(C):
+            pass
+        try:
+            pickle.dumps(C())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: pickle C instance - %s" % base
+        try:
+            cPickle.dumps(C())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: cPickle C instance - %s" % base
+        try:
+            pickle.dumps(C())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: pickle D instance - %s" % base
+        try:
+            cPickle.dumps(D())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: cPickle D instance - %s" % base
+        # Give C a nice generic __getstate__ and __setstate__
+        class C(base):
+            __slots__ = ['a']
+            def __getstate__(self):
+                try:
+                    d = self.__dict__.copy()
+                except AttributeError:
+                    d = {}
+                for cls in self.__class__.__mro__:
+                    for sn in cls.__dict__.get('__slots__', ()):
+                        try:
+                            d[sn] = getattr(self, sn)
+                        except AttributeError:
+                            pass
+                return d
+            def __setstate__(self, d):
+                for k, v in d.items():
+                    setattr(self, k, v)
+        class D(C):
+            pass
+        # Now it should work
+        x = C()
+        y = pickle.loads(pickle.dumps(x))
+        vereq(hasattr(y, 'a'), 0)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(hasattr(y, 'a'), 0)
+        x.a = 42
+        y = pickle.loads(pickle.dumps(x))
+        vereq(y.a, 42)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(y.a, 42)
+        x = D()
+        x.a = 42
+        x.b = 100
+        y = pickle.loads(pickle.dumps(x))
+        vereq(y.a + y.b, 142)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(y.a + y.b, 142)
+        # A subclass that adds a slot should also work
+        class E(C):
+            __slots__ = ['b']
+        x = E()
+        x.a = 42
+        x.b = "foo"
+        y = pickle.loads(pickle.dumps(x))
+        vereq(y.a, x.a)
+        vereq(y.b, x.b)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(y.a, x.a)
+        vereq(y.b, x.b)
+
+def copies():
+    if verbose: print "Testing copy.copy() and copy.deepcopy()..."
+    import copy
+    class C(object):
+        pass
+
+    a = C()
+    a.foo = 12
+    b = copy.copy(a)
+    vereq(b.__dict__, a.__dict__)
+
+    a.bar = [1,2,3]
+    c = copy.copy(a)
+    vereq(c.bar, a.bar)
+    verify(c.bar is a.bar)
+
+    d = copy.deepcopy(a)
+    vereq(d.__dict__, a.__dict__)
+    a.bar.append(4)
+    vereq(d.bar, [1,2,3])
+
+def binopoverride():
+    if verbose: print "Testing overrides of binary operations..."
+    class I(int):
+        def __repr__(self):
+            return "I(%r)" % int(self)
+        def __add__(self, other):
+            return I(int(self) + int(other))
+        __radd__ = __add__
+        def __pow__(self, other, mod=None):
+            if mod is None:
+                return I(pow(int(self), int(other)))
+            else:
+                return I(pow(int(self), int(other), int(mod)))
+        def __rpow__(self, other, mod=None):
+            if mod is None:
+                return I(pow(int(other), int(self), mod))
+            else:
+                return I(pow(int(other), int(self), int(mod)))
+
+    vereq(repr(I(1) + I(2)), "I(3)")
+    vereq(repr(I(1) + 2), "I(3)")
+    vereq(repr(1 + I(2)), "I(3)")
+    vereq(repr(I(2) ** I(3)), "I(8)")
+    vereq(repr(2 ** I(3)), "I(8)")
+    vereq(repr(I(2) ** 3), "I(8)")
+    vereq(repr(pow(I(2), I(3), I(5))), "I(3)")
+    class S(str):
+        def __eq__(self, other):
+            return self.lower() == other.lower()
+
+def subclasspropagation():
+    if verbose: print "Testing propagation of slot functions to subclasses..."
+    class A(object):
+        pass
+    class B(A):
+        pass
+    class C(A):
+        pass
+    class D(B, C):
+        pass
+    d = D()
+    orig_hash = hash(d) # related to id(d) in platform-dependent ways
+    A.__hash__ = lambda self: 42
+    vereq(hash(d), 42)
+    C.__hash__ = lambda self: 314
+    vereq(hash(d), 314)
+    B.__hash__ = lambda self: 144
+    vereq(hash(d), 144)
+    D.__hash__ = lambda self: 100
+    vereq(hash(d), 100)
+    del D.__hash__
+    vereq(hash(d), 144)
+    del B.__hash__
+    vereq(hash(d), 314)
+    del C.__hash__
+    vereq(hash(d), 42)
+    del A.__hash__
+    vereq(hash(d), orig_hash)
+    d.foo = 42
+    d.bar = 42
+    vereq(d.foo, 42)
+    vereq(d.bar, 42)
+    def __getattribute__(self, name):
+        if name == "foo":
+            return 24
+        return object.__getattribute__(self, name)
+    A.__getattribute__ = __getattribute__
+    vereq(d.foo, 24)
+    vereq(d.bar, 42)
+    def __getattr__(self, name):
+        if name in ("spam", "foo", "bar"):
+            return "hello"
+        raise AttributeError, name
+    B.__getattr__ = __getattr__
+    vereq(d.spam, "hello")
+    vereq(d.foo, 24)
+    vereq(d.bar, 42)
+    del A.__getattribute__
+    vereq(d.foo, 42)
+    del d.foo
+    vereq(d.foo, "hello")
+    vereq(d.bar, 42)
+    del B.__getattr__
+    try:
+        d.foo
+    except AttributeError:
+        pass
+    else:
+        raise TestFailed, "d.foo should be undefined now"
+
+    # Test a nasty bug in recurse_down_subclasses()
+    import gc
+    class A(object):
+        pass
+    class B(A):
+        pass
+    del B
+    gc.collect()
+    A.__setitem__ = lambda *a: None # crash
+
+def buffer_inherit():
+    import binascii
+    # SF bug [#470040] ParseTuple t# vs subclasses.
+    if verbose:
+        print "Testing that buffer interface is inherited ..."
+
+    class MyStr(str):
+        pass
+    base = 'abc'
+    m = MyStr(base)
+    # b2a_hex uses the buffer interface to get its argument's value, via
+    # PyArg_ParseTuple 't#' code.
+    vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
+
+    # It's not clear that unicode will continue to support the character
+    # buffer interface, and this test will fail if that's taken away.
+    class MyUni(unicode):
+        pass
+    base = u'abc'
+    m = MyUni(base)
+    vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
+
+    class MyInt(int):
+        pass
+    m = MyInt(42)
+    try:
+        binascii.b2a_hex(m)
+        raise TestFailed('subclass of int should not have a buffer interface')
+    except TypeError:
+        pass
+
+def str_of_str_subclass():
+    import binascii
+    import cStringIO
+
+    if verbose:
+        print "Testing __str__ defined in subclass of str ..."
+
+    class octetstring(str):
+        def __str__(self):
+            return binascii.b2a_hex(self)
+        def __repr__(self):
+            return self + " repr"
+
+    o = octetstring('A')
+    vereq(type(o), octetstring)
+    vereq(type(str(o)), str)
+    vereq(type(repr(o)), str)
+    vereq(ord(o), 0x41)
+    vereq(str(o), '41')
+    vereq(repr(o), 'A repr')
+    vereq(o.__str__(), '41')
+    vereq(o.__repr__(), 'A repr')
+
+    capture = cStringIO.StringIO()
+    # Calling str() or not exercises different internal paths.
+    print >> capture, o
+    print >> capture, str(o)
+    vereq(capture.getvalue(), '41\n41\n')
+    capture.close()
+
+def kwdargs():
+    if verbose: print "Testing keyword arguments to __init__, __call__..."
+    def f(a): return a
+    vereq(f.__call__(a=42), 42)
+    a = []
+    list.__init__(a, sequence=[0, 1, 2])
+    vereq(a, [0, 1, 2])
+
+def recursive__call__():
+    if verbose: print ("Testing recursive __call__() by setting to instance of "
+                        "class ...")
+    class A(object):
+        pass
+
+    A.__call__ = A()
+    try:
+        A()()
+    except RuntimeError:
+        pass
+    else:
+        raise TestFailed("Recursion limit should have been reached for "
+                         "__call__()")
+
+def delhook():
+    if verbose: print "Testing __del__ hook..."
+    log = []
+    class C(object):
+        def __del__(self):
+            log.append(1)
+    c = C()
+    vereq(log, [])
+    del c
+    vereq(log, [1])
+
+    class D(object): pass
+    d = D()
+    try: del d[0]
+    except TypeError: pass
+    else: raise TestFailed, "invalid del() didn't raise TypeError"
+
+def hashinherit():
+    if verbose: print "Testing hash of mutable subclasses..."
+
+    class mydict(dict):
+        pass
+    d = mydict()
+    try:
+        hash(d)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "hash() of dict subclass should fail"
+
+    class mylist(list):
+        pass
+    d = mylist()
+    try:
+        hash(d)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "hash() of list subclass should fail"
+
+def strops():
+    try: 'a' + 5
+    except TypeError: pass
+    else: raise TestFailed, "'' + 5 doesn't raise TypeError"
+
+    try: ''.split('')
+    except ValueError: pass
+    else: raise TestFailed, "''.split('') doesn't raise ValueError"
+
+    try: ''.join([0])
+    except TypeError: pass
+    else: raise TestFailed, "''.join([0]) doesn't raise TypeError"
+
+    try: ''.rindex('5')
+    except ValueError: pass
+    else: raise TestFailed, "''.rindex('5') doesn't raise ValueError"
+
+    try: '%(n)s' % None
+    except TypeError: pass
+    else: raise TestFailed, "'%(n)s' % None doesn't raise TypeError"
+
+    try: '%(n' % {}
+    except ValueError: pass
+    else: raise TestFailed, "'%(n' % {} '' doesn't raise ValueError"
+
+    try: '%*s' % ('abc')
+    except TypeError: pass
+    else: raise TestFailed, "'%*s' % ('abc') doesn't raise TypeError"
+
+    try: '%*.*s' % ('abc', 5)
+    except TypeError: pass
+    else: raise TestFailed, "'%*.*s' % ('abc', 5) doesn't raise TypeError"
+
+    try: '%s' % (1, 2)
+    except TypeError: pass
+    else: raise TestFailed, "'%s' % (1, 2) doesn't raise TypeError"
+
+    try: '%' % None
+    except ValueError: pass
+    else: raise TestFailed, "'%' % None doesn't raise ValueError"
+
+    vereq('534253'.isdigit(), 1)
+    vereq('534253x'.isdigit(), 0)
+    vereq('%c' % 5, '\x05')
+    vereq('%c' % '5', '5')
+
+def deepcopyrecursive():
+    if verbose: print "Testing deepcopy of recursive objects..."
+    class Node:
+        pass
+    a = Node()
+    b = Node()
+    a.b = b
+    b.a = a
+    z = deepcopy(a) # This blew up before
+
+def modules():
+    if verbose: print "Testing uninitialized module objects..."
+    from types import ModuleType as M
+    m = M.__new__(M)
+    str(m)
+    vereq(hasattr(m, "__name__"), 0)
+    vereq(hasattr(m, "__file__"), 0)
+    vereq(hasattr(m, "foo"), 0)
+    vereq(bool(m.__dict__), False)
+    m.foo = 1
+    vereq(m.__dict__, {"foo": 1})
+
+def dictproxyiterkeys():
+    class C(object):
+        def meth(self):
+            pass
+    if verbose: print "Testing dict-proxy iterkeys..."
+    keys = [ key for key in C.__dict__.iterkeys() ]
+    keys.sort()
+    vereq(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth'])
+
+def dictproxyitervalues():
+    class C(object):
+        def meth(self):
+            pass
+    if verbose: print "Testing dict-proxy itervalues..."
+    values = [ values for values in C.__dict__.itervalues() ]
+    vereq(len(values), 5)
+
+def dictproxyiteritems():
+    class C(object):
+        def meth(self):
+            pass
+    if verbose: print "Testing dict-proxy iteritems..."
+    keys = [ key for (key, value) in C.__dict__.iteritems() ]
+    keys.sort()
+    vereq(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth'])
+
+def funnynew():
+    if verbose: print "Testing __new__ returning something unexpected..."
+    class C(object):
+        def __new__(cls, arg):
+            if isinstance(arg, str): return [1, 2, 3]
+            elif isinstance(arg, int): return object.__new__(D)
+            else: return object.__new__(cls)
+    class D(C):
+        def __init__(self, arg):
+            self.foo = arg
+    vereq(C("1"), [1, 2, 3])
+    vereq(D("1"), [1, 2, 3])
+    d = D(None)
+    veris(d.foo, None)
+    d = C(1)
+    vereq(isinstance(d, D), True)
+    vereq(d.foo, 1)
+    d = D(1)
+    vereq(isinstance(d, D), True)
+    vereq(d.foo, 1)
+
+def imulbug():
+    # SF bug 544647
+    if verbose: print "Testing for __imul__ problems..."
+    class C(object):
+        def __imul__(self, other):
+            return (self, other)
+    x = C()
+    y = x
+    y *= 1.0
+    vereq(y, (x, 1.0))
+    y = x
+    y *= 2
+    vereq(y, (x, 2))
+    y = x
+    y *= 3L
+    vereq(y, (x, 3L))
+    y = x
+    y *= 1L<<100
+    vereq(y, (x, 1L<<100))
+    y = x
+    y *= None
+    vereq(y, (x, None))
+    y = x
+    y *= "foo"
+    vereq(y, (x, "foo"))
+
+def docdescriptor():
+    # SF bug 542984
+    if verbose: print "Testing __doc__ descriptor..."
+    class DocDescr(object):
+        def __get__(self, object, otype):
+            if object:
+                object = object.__class__.__name__ + ' instance'
+            if otype:
+                otype = otype.__name__
+            return 'object=%s; type=%s' % (object, otype)
+    class OldClass:
+        __doc__ = DocDescr()
+    class NewClass(object):
+        __doc__ = DocDescr()
+    vereq(OldClass.__doc__, 'object=None; type=OldClass')
+    vereq(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
+    vereq(NewClass.__doc__, 'object=None; type=NewClass')
+    vereq(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
+
+def copy_setstate():
+    if verbose:
+        print "Testing that copy.*copy() correctly uses __setstate__..."
+    import copy
+    class C(object):
+        def __init__(self, foo=None):
+            self.foo = foo
+            self.__foo = foo
+        def setfoo(self, foo=None):
+            self.foo = foo
+        def getfoo(self):
+            return self.__foo
+        def __getstate__(self):
+            return [self.foo]
+        def __setstate__(self, lst):
+            assert len(lst) == 1
+            self.__foo = self.foo = lst[0]
+    a = C(42)
+    a.setfoo(24)
+    vereq(a.foo, 24)
+    vereq(a.getfoo(), 42)
+    b = copy.copy(a)
+    vereq(b.foo, 24)
+    vereq(b.getfoo(), 24)
+    b = copy.deepcopy(a)
+    vereq(b.foo, 24)
+    vereq(b.getfoo(), 24)
+
+def slices():
+    if verbose:
+        print "Testing cases with slices and overridden __getitem__ ..."
+    # Strings
+    vereq("hello"[:4], "hell")
+    vereq("hello"[slice(4)], "hell")
+    vereq(str.__getitem__("hello", slice(4)), "hell")
+    class S(str):
+        def __getitem__(self, x):
+            return str.__getitem__(self, x)
+    vereq(S("hello")[:4], "hell")
+    vereq(S("hello")[slice(4)], "hell")
+    vereq(S("hello").__getitem__(slice(4)), "hell")
+    # Tuples
+    vereq((1,2,3)[:2], (1,2))
+    vereq((1,2,3)[slice(2)], (1,2))
+    vereq(tuple.__getitem__((1,2,3), slice(2)), (1,2))
+    class T(tuple):
+        def __getitem__(self, x):
+            return tuple.__getitem__(self, x)
+    vereq(T((1,2,3))[:2], (1,2))
+    vereq(T((1,2,3))[slice(2)], (1,2))
+    vereq(T((1,2,3)).__getitem__(slice(2)), (1,2))
+    # Lists
+    vereq([1,2,3][:2], [1,2])
+    vereq([1,2,3][slice(2)], [1,2])
+    vereq(list.__getitem__([1,2,3], slice(2)), [1,2])
+    class L(list):
+        def __getitem__(self, x):
+            return list.__getitem__(self, x)
+    vereq(L([1,2,3])[:2], [1,2])
+    vereq(L([1,2,3])[slice(2)], [1,2])
+    vereq(L([1,2,3]).__getitem__(slice(2)), [1,2])
+    # Now do lists and __setitem__
+    a = L([1,2,3])
+    a[slice(1, 3)] = [3,2]
+    vereq(a, [1,3,2])
+    a[slice(0, 2, 1)] = [3,1]
+    vereq(a, [3,1,2])
+    a.__setitem__(slice(1, 3), [2,1])
+    vereq(a, [3,2,1])
+    a.__setitem__(slice(0, 2, 1), [2,3])
+    vereq(a, [2,3,1])
+
+def subtype_resurrection():
+    if verbose:
+        print "Testing resurrection of new-style instance..."
+
+    class C(object):
+        container = []
+
+        def __del__(self):
+            # resurrect the instance
+            C.container.append(self)
+
+    c = C()
+    c.attr = 42
+    # The most interesting thing here is whether this blows up, due to flawed
+    #  GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 bug).
+    del c
+    gc.collect()
+    gc.collect()
+    gc.collect()
+    # If that didn't blow up, it's also interesting to see whether clearing
+    # the last container slot works:  that will attempt to delete c again,
+    # which will cause c to get appended back to the container again "during"
+    # the del.
+    del C.container[-1]
+    gc.collect()
+    gc.collect()
+    gc.collect()
+    vereq(len(C.container), 1)
+    vereq(C.container[-1].attr, 42)
+
+    # Make c mortal again, so that the test framework with -l doesn't report
+    # it as a leak.
+    del C.__del__
+
+def slottrash():
+    # Deallocating deeply nested slotted trash caused stack overflows
+    if verbose:
+        print "Testing slot trash..."
+    class trash(object):
+        __slots__ = ['x']
+        def __init__(self, x):
+            self.x = x
+    o = None
+    for i in xrange(50000):
+        o = trash(o)
+    del o
+
+def slotmultipleinheritance():
+    # SF bug 575229, multiple inheritance w/ slots dumps core
+    class A(object):
+        __slots__=()
+    class B(object):
+        pass
+    class C(A,B) :
+        __slots__=()
+    # No __basicsize__ in PyPy
+    # vereq(C.__basicsize__, B.__basicsize__)
+    verify(hasattr(C, '__dict__'))
+    verify(hasattr(C, '__weakref__'))
+    C().x = 2
+
+def testrmul():
+    # SF patch 592646
+    if verbose:
+        print "Testing correct invocation of __rmul__..."
+    class C(object):
+        def __mul__(self, other):
+            return "mul"
+        def __rmul__(self, other):
+            return "rmul"
+    a = C()
+    vereq(a*2, "mul")
+    vereq(a*2.2, "mul")
+    vereq(2*a, "rmul")
+    vereq(2.2*a, "rmul")
+
+def testipow():
+    # [SF bug 620179]
+    if verbose:
+        print "Testing correct invocation of __ipow__..."
+    class C(object):
+        def __ipow__(self, other):
+            pass
+    a = C()
+    a **= 2
+
+def do_this_first():
+    if verbose:
+        print "Testing SF bug 551412 ..."
+    # This dumps core when SF bug 551412 isn't fixed --
+    # but only when test_descr.py is run separately.
+    # (That can't be helped -- as soon as PyType_Ready()
+    # is called for PyLong_Type, the bug is gone.)
+    class UserLong(object):
+        def __pow__(self, *args):
+            pass
+    try:
+        pow(0L, UserLong(), 0L)
+    except:
+        pass
+
+    if verbose:
+        print "Testing SF bug 570483..."
+    # Another segfault only when run early
+    # (before PyType_Ready(tuple) is called)
+    type.mro(tuple)
+
+def test_mutable_bases():
+    if verbose:
+        print "Testing mutable bases..."
+    # stuff that should work:
+    class C(object):
+        pass
+    class C2(object):
+        def __getattribute__(self, attr):
+            if attr == 'a':
+                return 2
+            else:
+                return super(C2, self).__getattribute__(attr)
+        def meth(self):
+            return 1
+    class D(C):
+        pass
+    class E(D):
+        pass
+    d = D()
+    e = E()
+    D.__bases__ = (C,)
+    D.__bases__ = (C2,)
+    vereq(d.meth(), 1)
+    vereq(e.meth(), 1)
+    vereq(d.a, 2)
+    vereq(e.a, 2)
+    vereq(C2.__subclasses__(), [D])
+
+    # stuff that shouldn't:
+    class L(list):
+        pass
+
+    try:
+        L.__bases__ = (dict,)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't turn list subclass into dict subclass"
+
+    try:
+        list.__bases__ = (dict,)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to assign to list.__bases__"
+
+    try:
+        del D.__bases__
+    except (AttributeError, TypeError):
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to delete .__bases__"
+
+    try:
+        D.__bases__ = ()
+    except TypeError, msg:
+        if str(msg) == "a new-style class can't have only classic bases":
+            raise TestFailed, "wrong error message for .__bases__ = ()"
+    else:
+        raise TestFailed, "shouldn't be able to set .__bases__ to ()"
+
+    try:
+        D.__bases__ = (D,)
+    except TypeError:
+        pass
+    else:
+        # actually, we'll have crashed by here...
+        raise TestFailed, "shouldn't be able to create inheritance cycles"
+
+    try:
+        D.__bases__ = (C, C)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "didn't detect repeated base classes"
+
+    try:
+        D.__bases__ = (E,)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to create inheritance cycles"
+
+    # let's throw a classic class into the mix:
+    class Classic:
+        def meth2(self):
+            return 3
+
+    D.__bases__ = (C, Classic)
+
+    vereq(d.meth2(), 3)
+    vereq(e.meth2(), 3)
+    try:
+        d.a
+    except AttributeError:
+        pass
+    else:
+        raise TestFailed, "attribute should have vanished"
+
+    try:
+        D.__bases__ = (Classic,)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "new-style class must have a new-style base"
+
+def test_mutable_bases_with_failing_mro():
+    if verbose:
+        print "Testing mutable bases with failing mro..."
+    class WorkOnce(type):
+        def __new__(self, name, bases, ns):
+            self.flag = 0
+            return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
+        def mro(self):
+            if self.flag > 0:
+                raise RuntimeError, "bozo"
+            else:
+                self.flag += 1
+                return type.mro(self)
+
+    class WorkAlways(type):
+        def mro(self):
+            # this is here to make sure that .mro()s aren't called
+            # with an exception set (which was possible at one point).
+            # An error message will be printed in a debug build.
+            # What's a good way to test for this?
+            return type.mro(self)
+
+    class C(object):
+        pass
+
+    class C2(object):
+        pass
+
+    class D(C):
+        pass
+
+    class E(D):
+        pass
+
+    class F(D):
+        __metaclass__ = WorkOnce
+
+    class G(D):
+        __metaclass__ = WorkAlways
+
+    # Immediate subclasses have their mro's adjusted in alphabetical
+    # order, so E's will get adjusted before adjusting F's fails.  We
+    # check here that E's gets restored.
+
+    E_mro_before = E.__mro__
+    D_mro_before = D.__mro__
+
+    try:
+        D.__bases__ = (C2,)
+    except RuntimeError:
+        vereq(E.__mro__, E_mro_before)
+        vereq(D.__mro__, D_mro_before)
+    else:
+        raise TestFailed, "exception not propagated"
+
+def test_mutable_bases_catch_mro_conflict():
+    if verbose:
+        print "Testing mutable bases catch mro conflict..."
+    class A(object):
+        pass
+
+    class B(object):
+        pass
+
+    class C(A, B):
+        pass
+
+    class D(A, B):
+        pass
+
+    class E(C, D):
+        pass
+
+    try:
+        C.__bases__ = (B, A)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "didn't catch MRO conflict"
+
+def mutable_names():
+    if verbose:
+        print "Testing mutable names..."
+    class C(object):
+        pass
+
+    # C.__module__ could be 'test_descr' or '__main__'
+    mod = C.__module__
+
+    C.__name__ = 'D'
+    vereq((C.__module__, C.__name__), (mod, 'D'))
+
+    C.__name__ = 'D.E'
+    vereq((C.__module__, C.__name__), (mod, 'D.E'))
+
+def subclass_right_op():
+    if verbose:
+        print "Testing correct dispatch of subclass overloading __r<op>__..."
+
+    # This code tests various cases where right-dispatch of a subclass
+    # should be preferred over left-dispatch of a base class.
+
+    # Case 1: subclass of int; this tests code in abstract.c::binary_op1()
+
+    class B(int):
+        def __floordiv__(self, other):
+            return "B.__floordiv__"
+        def __rfloordiv__(self, other):
+            return "B.__rfloordiv__"
+
+    vereq(B(1) // 1, "B.__floordiv__")
+    vereq(1 // B(1), "B.__rfloordiv__")
+
+    # Case 2: subclass of object; this is just the baseline for case 3
+
+    class C(object):
+        def __floordiv__(self, other):
+            return "C.__floordiv__"
+        def __rfloordiv__(self, other):
+            return "C.__rfloordiv__"
+
+    vereq(C() // 1, "C.__floordiv__")
+    vereq(1 // C(), "C.__rfloordiv__")
+
+    # Case 3: subclass of new-style class; here it gets interesting
+
+    class D(C):
+        def __floordiv__(self, other):
+            return "D.__floordiv__"
+        def __rfloordiv__(self, other):
+            return "D.__rfloordiv__"
+
+    vereq(D() // C(), "D.__floordiv__")
+    vereq(C() // D(), "D.__rfloordiv__")
+
+    # Case 4: this didn't work right in 2.2.2 and 2.3a1
+
+    class E(C):
+        pass
+
+    vereq(E.__rfloordiv__, C.__rfloordiv__)
+
+    vereq(E() // 1, "C.__floordiv__")
+    vereq(1 // E(), "C.__rfloordiv__")
+    vereq(E() // C(), "C.__floordiv__")
+    vereq(C() // E(), "C.__floordiv__") # This one would fail
+
+def dict_type_with_metaclass():
+    if verbose:
+        print "Testing type of __dict__ when __metaclass__ set..."
+
+    class B(object):
+        pass
+    class M(type):
+        pass
+    class C:
+        # In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
+        __metaclass__ = M
+    veris(type(C.__dict__), type(B.__dict__))
+
+def meth_class_get():
+    # Full coverage of descrobject.c::classmethod_get()
+    if verbose:
+        print "Testing __get__ method of METH_CLASS C methods..."
+    # Baseline
+    arg = [1, 2, 3]
+    res = {1: None, 2: None, 3: None}
+    vereq(dict.fromkeys(arg), res)
+    vereq({}.fromkeys(arg), res)
+    # Now get the descriptor
+    descr = dict.__dict__["fromkeys"]
+    # More baseline using the descriptor directly
+    vereq(descr.__get__(None, dict)(arg), res)
+    vereq(descr.__get__({})(arg), res)
+    # Now check various error cases
+    try:
+        descr.__get__(None, None)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't have allowed descr.__get__(None, None)"
+    try:
+        descr.__get__(42)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't have allowed descr.__get__(42)"
+    try:
+        descr.__get__(None, 42)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't have allowed descr.__get__(None, 42)"
+    try:
+        descr.__get__(None, int)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't have allowed descr.__get__(None, int)"
+
+def isinst_isclass():
+    if verbose:
+        print "Testing proxy isinstance() and isclass()..."
+    class Proxy(object):
+        def __init__(self, obj):
+            self.__obj = obj
+        def __getattribute__(self, name):
+            if name.startswith("_Proxy__"):
+                return object.__getattribute__(self, name)
+            else:
+                return getattr(self.__obj, name)
+    # Test with a classic class
+    class C:
+        pass
+    a = C()
+    pa = Proxy(a)
+    verify(isinstance(a, C))  # Baseline
+    verify(isinstance(pa, C)) # Test
+    # Test with a classic subclass
+    class D(C):
+        pass
+    a = D()
+    pa = Proxy(a)
+    verify(isinstance(a, C))  # Baseline
+    verify(isinstance(pa, C)) # Test
+    # Test with a new-style class
+    class C(object):
+        pass
+    a = C()
+    pa = Proxy(a)
+    verify(isinstance(a, C))  # Baseline
+    verify(isinstance(pa, C)) # Test
+    # Test with a new-style subclass
+    class D(C):
+        pass
+    a = D()
+    pa = Proxy(a)
+    verify(isinstance(a, C))  # Baseline
+    verify(isinstance(pa, C)) # Test
+
+def proxysuper():
+    if verbose:
+        print "Testing super() for a proxy object..."
+    class Proxy(object):
+        def __init__(self, obj):
+            self.__obj = obj
+        def __getattribute__(self, name):
+            if name.startswith("_Proxy__"):
+                return object.__getattribute__(self, name)
+            else:
+                return getattr(self.__obj, name)
+
+    class B(object):
+        def f(self):
+            return "B.f"
+
+    class C(B):
+        def f(self):
+            return super(C, self).f() + "->C.f"
+
+    obj = C()
+    p = Proxy(obj)
+    vereq(C.__dict__["f"](p), "B.f->C.f")
+
+def carloverre():
+    if verbose:
+        print "Testing prohibition of Carlo Verre's hack..."
+    try:
+        object.__setattr__(str, "foo", 42)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "Carlo Verre __setattr__ suceeded!"
+    try:
+        object.__delattr__(str, "lower")
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "Carlo Verre __delattr__ succeeded!"
+
+def weakref_segfault():
+    # SF 742911
+    if verbose:
+        print "Testing weakref segfault..."
+
+    import weakref
+
+    class Provoker:
+        def __init__(self, referrent):
+            self.ref = weakref.ref(referrent)
+
+        def __del__(self):
+            x = self.ref()
+
+    class Oops(object):
+        pass
+
+    o = Oops()
+    o.whatever = Provoker(o)
+    del o
+
+def wrapper_segfault():
+    # SF 927248: deeply nested wrappers could cause stack overflow
+    f = lambda:None
+    for i in xrange(1000000):
+        f = f.__call__
+    f = None
+
+# Fix SF #762455, segfault when sys.stdout is changed in getattr
+def filefault():
+    if verbose:
+        print "Testing sys.stdout is changed in getattr..."
+    import sys
+    class StdoutGuard:
+        def __getattr__(self, attr):
+            sys.stdout = sys.__stdout__
+            raise RuntimeError("Premature access to sys.stdout.%s" % attr)
+    sys.stdout = StdoutGuard()
+    try:
+        print "Oops!"
+    except RuntimeError:
+        pass
+
+def vicious_descriptor_nonsense():
+    # A potential segfault spotted by Thomas Wouters in mail to
+    # python-dev 2003-04-17, turned into an example & fixed by Michael
+    # Hudson just less than four months later...
+    if verbose:
+        print "Testing vicious_descriptor_nonsense..."
+
+    class Evil(object):
+        def __hash__(self):
+            return hash('attr')
+        def __eq__(self, other):
+            del C.attr
+            return 0
+
+    class Descr(object):
+        def __get__(self, ob, type=None):
+            return 1
+
+    class C(object):
+        attr = Descr()
+
+    c = C()
+    c.__dict__[Evil()] = 0
+
+    vereq(c.attr, 1)
+    # this makes a crash more likely:
+    gc.collect()
+    vereq(hasattr(c, 'attr'), False)
+
+def test_init():
+    # SF 1155938
+    class Foo(object):
+        def __init__(self):
+            return 10
+    try:
+        Foo()
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "did not test __init__() for None return"
+
+def methodwrapper():
+    # <type 'method-wrapper'> did not support any reflection before 2.5
+    if verbose:
+        print "Testing method-wrapper objects..."
+
+    l = []
+    vereq(l.__add__, l.__add__)
+    vereq(l.__add__, [].__add__)
+    verify(l.__add__ != [5].__add__)
+    verify(l.__add__ != l.__mul__)
+    verify(l.__add__.__name__ == '__add__')
+    verify(l.__add__.__self__ is l)
+    verify(l.__add__.__objclass__ is list)
+    vereq(l.__add__.__doc__, list.__add__.__doc__)
+    try:
+        hash(l.__add__)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed("no TypeError from hash([].__add__)")
+
+    t = ()
+    t += (7,)
+    vereq(t.__add__, (7,).__add__)
+    vereq(hash(t.__add__), hash((7,).__add__))
+
+def notimplemented():
+    # all binary methods should be able to return a NotImplemented
+    if verbose:
+        print "Testing NotImplemented..."
+
+    import sys
+    import types
+    import operator
+
+    def specialmethod(self, other):
+        return NotImplemented
+
+    def check(expr, x, y):
+        try:
+            exec expr in {'x': x, 'y': y, 'operator': operator}
+        except TypeError:
+            pass
+        else:
+            raise TestFailed("no TypeError from %r" % (expr,))
+
+    N1 = sys.maxint + 1L    # might trigger OverflowErrors instead of TypeErrors
+    N2 = sys.maxint         # if sizeof(int) < sizeof(long), might trigger
+                            #   ValueErrors instead of TypeErrors
+    for metaclass in [type, types.ClassType]:
+        for name, expr, iexpr in [
+                ('__add__',      'x + y',                   'x += y'),
+                ('__sub__',      'x - y',                   'x -= y'),
+                ('__mul__',      'x * y',                   'x *= y'),
+                ('__truediv__',  'operator.truediv(x, y)',  None),
+                ('__floordiv__', 'operator.floordiv(x, y)', None),
+                ('__div__',      'x / y',                   'x /= y'),
+                ('__mod__',      'x % y',                   'x %= y'),
+                ('__divmod__',   'divmod(x, y)',            None),
+                ('__pow__',      'x ** y',                  'x **= y'),
+                ('__lshift__',   'x << y',                  'x <<= y'),
+                ('__rshift__',   'x >> y',                  'x >>= y'),
+                ('__and__',      'x & y',                   'x &= y'),
+                ('__or__',       'x | y',                   'x |= y'),
+                ('__xor__',      'x ^ y',                   'x ^= y'),
+                ('__coerce__',   'coerce(x, y)',            None)]:
+            if name == '__coerce__':
+                rname = name
+            else:
+                rname = '__r' + name[2:]
+            A = metaclass('A', (), {name: specialmethod})
+            B = metaclass('B', (), {rname: specialmethod})
+            a = A()
+            b = B()
+            check(expr, a, a)
+            check(expr, a, b)
+            check(expr, b, a)
+            check(expr, b, b)
+            check(expr, a, N1)
+            check(expr, a, N2)
+            check(expr, N1, b)
+            check(expr, N2, b)
+            if iexpr:
+                check(iexpr, a, a)
+                check(iexpr, a, b)
+                check(iexpr, b, a)
+                check(iexpr, b, b)
+                check(iexpr, a, N1)
+                check(iexpr, a, N2)
+                iname = '__i' + name[2:]
+                C = metaclass('C', (), {iname: specialmethod})
+                c = C()
+                check(iexpr, c, a)
+                check(iexpr, c, b)
+                check(iexpr, c, N1)
+                check(iexpr, c, N2)
+
+def test_assign_slice():
+    # ceval.c's assign_slice used to check for
+    # tp->tp_as_sequence->sq_slice instead of
+    # tp->tp_as_sequence->sq_ass_slice
+
+    class C(object):
+        def __setslice__(self, start, stop, value):
+            self.value = value
+
+    c = C()
+    c[1:2] = 3
+    vereq(c.value, 3)
+
+def test_main():
+    testfuncs = [
+    weakref_segfault, # Must be first, somehow
+    wrapper_segfault,
+    do_this_first,
+    class_docstrings,
+    lists,
+    dicts,
+    dict_constructor,
+    test_dir,
+    ints,
+    longs,
+    floats,
+    complexes,
+    # spamlists,
+    # spamdicts,
+    pydicts,
+    pylists,
+    metaclass,
+    pymods,
+    multi,
+    mro_disagreement,
+    diamond,
+    ex5,
+    monotonicity,
+    consistency_with_epg,
+    objects,
+    slots,
+    slotspecials,
+    dynamics,
+    errors,
+    # classmethods,
+    # classmethods_in_c,
+    staticmethods,
+    staticmethods_in_c,
+    classic,
+    compattr,
+    newslot,
+    altmro,
+    overloading,
+    methods,
+    specials,
+    weakrefs,
+    properties,
+    supers,
+    inherits,
+    keywords,
+    restricted,
+    str_subclass_as_dict_key,
+    classic_comparisons,
+    rich_comparisons,
+    coercions,
+    descrdoc,
+    setclass,
+    setdict,
+    pickles,
+    copies,
+    binopoverride,
+    subclasspropagation,
+    buffer_inherit,
+    str_of_str_subclass,
+    kwdargs,
+    recursive__call__,
+    delhook,
+    hashinherit,
+    strops,
+    deepcopyrecursive,
+    modules,
+    dictproxyiterkeys,
+    dictproxyitervalues,
+    dictproxyiteritems,
+    pickleslots,
+    funnynew,
+    imulbug,
+    docdescriptor,
+    copy_setstate,
+    slices,
+    subtype_resurrection,
+    slottrash,
+    slotmultipleinheritance,
+    testrmul,
+    testipow,
+    test_mutable_bases,
+    test_mutable_bases_with_failing_mro,
+    test_mutable_bases_catch_mro_conflict,
+    mutable_names,
+    subclass_right_op,
+    dict_type_with_metaclass,
+    meth_class_get,
+    isinst_isclass,
+    proxysuper,
+    carloverre,
+    filefault,
+    vicious_descriptor_nonsense,
+    test_init,
+    methodwrapper,
+    notimplemented,
+    test_assign_slice,
+    ]
+
+    n = len(testfuncs)
+    success = 0
+
+    for testfunc in testfuncs:
+        try:
+            print "*"*40
+            testfunc()
+        except Exception, e:
+            if isinstance(e, KeyboardInterrupt):
+                raise
+            print "-->", testfunc.__name__, "FAILURE(%d/%d)" % (success, n), str(e)
+        else:
+            success += 1
+            print "-->", testfunc.__name__, "OK(%d/%d)" % (success, n)
+
+    if n != success:
+        raise TestFailed, "%d/%d" % (success, n)
+    else:
+        if verbose: print "All OK"
+
+if __name__ == "__main__":
+    test_main()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_descrtut.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_descrtut.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,493 @@
+# This contains most of the executable examples from Guido's descr
+# tutorial, once at
+#
+#     http://www.python.org/2.2/descrintro.html
+#
+# A few examples left implicit in the writeup were fleshed out, a few were
+# skipped due to lack of interest (e.g., faking super() by hand isn't
+# of much interest anymore), and a few were fiddled to make the output
+# deterministic.
+
+from test.test_support import sortdict
+import pprint
+
+class defaultdict(dict):
+    def __init__(self, default=None):
+        dict.__init__(self)
+        self.default = default
+
+    def __getitem__(self, key):
+        try:
+            return dict.__getitem__(self, key)
+        except KeyError:
+            if key == '__builtins__': raise
+            return self.default
+
+    def get(self, key, *args):
+        if not args and key != '__builtins__':
+            args = (self.default,)
+        return dict.get(self, key, *args)
+
+    def merge(self, other):
+        for key in other:
+            if key not in self:
+                self[key] = other[key]
+
+test_1 = """
+
+Here's the new type at work:
+
+    >>> print defaultdict               # show our type
+    <class 'test.test_descrtut.defaultdict'>
+    >>> print type(defaultdict)         # its metatype
+    <type 'type'>
+    >>> a = defaultdict(default=0.0)    # create an instance
+    >>> print a                         # show the instance
+    {}
+    >>> print type(a)                   # show its type
+    <class 'test.test_descrtut.defaultdict'>
+    >>> print a.__class__               # show its class
+    <class 'test.test_descrtut.defaultdict'>
+    >>> print type(a) is a.__class__    # its type is its class
+    True
+    >>> a[1] = 3.25                     # modify the instance
+    >>> print a                         # show the new value
+    {1: 3.25}
+    >>> print a[1]                      # show the new item
+    3.25
+    >>> print a[0]                      # a non-existant item
+    0.0
+    >>> a.merge({1:100, 2:200})         # use a dict method
+    >>> print sortdict(a)               # show the result
+    {1: 3.25, 2: 200}
+    >>>
+
+We can also use the new type in contexts where classic only allows "real"
+dictionaries, such as the locals/globals dictionaries for the exec
+statement or the built-in function eval():
+
+    >>> def sorted(seq):
+    ...     seq.sort()
+    ...     return seq
+    >>> print sorted(a.keys())
+    [1, 2]
+    >>> exec "x = 3; print x" in a
+    3
+    >>> print sorted([str(key) for key in a.keys()])
+    ['1', '2', '__builtins__', 'x']
+    >>> print a['x']
+    3
+    >>>
+
+Now I'll show that defaultdict instances have dynamic instance variables,
+just like classic classes:
+
+    >>> a.default = -1
+    >>> print a["noway"]
+    -1
+    >>> a.default = -1000
+    >>> print a["noway"]
+    -1000
+    >>> 'default' in dir(a)
+    True
+    >>> a.x1 = 100
+    >>> a.x2 = 200
+    >>> print a.x1
+    100
+    >>> d = dir(a)
+    >>> 'default' in d and 'x1' in d and 'x2' in d
+    True
+    >>> print sortdict(a.__dict__)
+    {'default': -1000, 'x1': 100, 'x2': 200}
+    >>>
+"""
+
+class defaultdict2(dict):
+    __slots__ = ['default']
+
+    def __init__(self, default=None):
+        dict.__init__(self)
+        self.default = default
+
+    def __getitem__(self, key):
+        try:
+            return dict.__getitem__(self, key)
+        except KeyError:
+            return self.default
+
+    def get(self, key, *args):
+        if not args:
+            args = (self.default,)
+        return dict.get(self, key, *args)
+
+    def merge(self, other):
+        for key in other:
+            if key not in self:
+                self[key] = other[key]
+
+test_2 = """
+
+The __slots__ declaration takes a list of instance variables, and reserves
+space for exactly these in the instance. When __slots__ is used, other
+instance variables cannot be assigned to:
+
+    >>> a = defaultdict2(default=0.0)
+    >>> a[1]
+    0.0
+    >>> a.default = -1
+    >>> a[1]
+    -1
+    >>> a.x1 = 1
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    AttributeError: 'defaultdict2' object has no attribute 'x1'
+    >>>
+
+"""
+
+test_3 = """
+
+Introspecting instances of built-in types
+
+For instance of built-in types, x.__class__ is now the same as type(x):
+
+    >>> type([])
+    <type 'list'>
+    >>> [].__class__
+    <type 'list'>
+    >>> list
+    <type 'list'>
+    >>> isinstance([], list)
+    True
+    >>> isinstance([], dict)
+    False
+    >>> isinstance([], object)
+    True
+    >>>
+
+Under the new proposal, the __methods__ attribute no longer exists:
+
+    >>> [].__methods__
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    AttributeError: 'list' object has no attribute '__methods__'
+    >>>
+
+Instead, you can get the same information from the list type:
+
+    >>> pprint.pprint(dir(list))    # like list.__dict__.keys(), but sorted
+    ['__add__',
+     '__class__',
+     '__contains__',
+     '__delattr__',
+     '__delitem__',
+     '__doc__',
+     '__eq__',
+     '__ge__',
+     '__getattribute__',
+     '__getitem__',
+     '__gt__',
+     '__hash__',
+     '__iadd__',
+     '__imul__',
+     '__init__',
+     '__iter__',
+     '__le__',
+     '__len__',
+     '__lt__',
+     '__mul__',
+     '__ne__',
+     '__new__',
+     '__radd__',
+     '__reduce__',
+     '__reduce_ex__',
+     '__repr__',
+     '__reversed__',
+     '__rmul__',
+     '__setattr__',
+     '__setitem__',
+     '__str__',
+     'append',
+     'count',
+     'extend',
+     'index',
+     'insert',
+     'pop',
+     'remove',
+     'reverse',
+     'sort']
+
+The new introspection API gives more information than the old one:  in
+addition to the regular methods, it also shows the methods that are
+normally invoked through special notations, e.g. __iadd__ (+=), __len__
+(len), __ne__ (!=). You can invoke any method from this list directly:
+
+    >>> a = ['tic', 'tac']
+    >>> list.__len__(a)          # same as len(a)
+    2
+    >>> a.__len__()              # ditto
+    2
+    >>> list.append(a, 'toe')    # same as a.append('toe')
+    >>> a
+    ['tic', 'tac', 'toe']
+    >>>
+
+This is just like it is for user-defined classes.
+"""
+
+test_4 = """
+
+Static methods and class methods
+
+The new introspection API makes it possible to add static methods and class
+methods. Static methods are easy to describe: they behave pretty much like
+static methods in C++ or Java. Here's an example:
+
+    >>> class C:
+    ...
+    ...     @staticmethod
+    ...     def foo(x, y):
+    ...         print "staticmethod", x, y
+
+    >>> C.foo(1, 2)
+    staticmethod 1 2
+    >>> c = C()
+    >>> c.foo(1, 2)
+    staticmethod 1 2
+
+Class methods use a similar pattern to declare methods that receive an
+implicit first argument that is the *class* for which they are invoked.
+
+    >>> class C:
+    ...     @classmethod
+    ...     def foo(cls, y):
+    ...         print "classmethod", cls, y
+
+    >>> C.foo(1)
+    classmethod test.test_descrtut.C 1
+    >>> c = C()
+    >>> c.foo(1)
+    classmethod test.test_descrtut.C 1
+
+    >>> class D(C):
+    ...     pass
+
+    >>> D.foo(1)
+    classmethod test.test_descrtut.D 1
+    >>> d = D()
+    >>> d.foo(1)
+    classmethod test.test_descrtut.D 1
+
+This prints "classmethod __main__.D 1" both times; in other words, the
+class passed as the first argument of foo() is the class involved in the
+call, not the class involved in the definition of foo().
+
+But notice this:
+
+    >>> class E(C):
+    ...     @classmethod
+    ...     def foo(cls, y): # override C.foo
+    ...         print "E.foo() called"
+    ...         C.foo(y)
+
+    >>> E.foo(1)
+    E.foo() called
+    classmethod test.test_descrtut.C 1
+    >>> e = E()
+    >>> e.foo(1)
+    E.foo() called
+    classmethod test.test_descrtut.C 1
+
+In this example, the call to C.foo() from E.foo() will see class C as its
+first argument, not class E. This is to be expected, since the call
+specifies the class C. But it stresses the difference between these class
+methods and methods defined in metaclasses (where an upcall to a metamethod
+would pass the target class as an explicit first argument).
+"""
+
+test_5 = """
+
+Attributes defined by get/set methods
+
+
+    >>> class property(object):
+    ...
+    ...     def __init__(self, get, set=None):
+    ...         self.__get = get
+    ...         self.__set = set
+    ...
+    ...     def __get__(self, inst, type=None):
+    ...         return self.__get(inst)
+    ...
+    ...     def __set__(self, inst, value):
+    ...         if self.__set is None:
+    ...             raise AttributeError, "this attribute is read-only"
+    ...         return self.__set(inst, value)
+
+Now let's define a class with an attribute x defined by a pair of methods,
+getx() and and setx():
+
+    >>> class C(object):
+    ...
+    ...     def __init__(self):
+    ...         self.__x = 0
+    ...
+    ...     def getx(self):
+    ...         return self.__x
+    ...
+    ...     def setx(self, x):
+    ...         if x < 0: x = 0
+    ...         self.__x = x
+    ...
+    ...     x = property(getx, setx)
+
+Here's a small demonstration:
+
+    >>> a = C()
+    >>> a.x = 10
+    >>> print a.x
+    10
+    >>> a.x = -10
+    >>> print a.x
+    0
+    >>>
+
+Hmm -- property is builtin now, so let's try it that way too.
+
+    >>> del property  # unmask the builtin
+    >>> property
+    <type 'property'>
+
+    >>> class C(object):
+    ...     def __init__(self):
+    ...         self.__x = 0
+    ...     def getx(self):
+    ...         return self.__x
+    ...     def setx(self, x):
+    ...         if x < 0: x = 0
+    ...         self.__x = x
+    ...     x = property(getx, setx)
+
+
+    >>> a = C()
+    >>> a.x = 10
+    >>> print a.x
+    10
+    >>> a.x = -10
+    >>> print a.x
+    0
+    >>>
+"""
+
+test_6 = """
+
+Method resolution order
+
+This example is implicit in the writeup.
+
+>>> class A:    # classic class
+...     def save(self):
+...         print "called A.save()"
+>>> class B(A):
+...     pass
+>>> class C(A):
+...     def save(self):
+...         print "called C.save()"
+>>> class D(B, C):
+...     pass
+
+>>> D().save()
+called A.save()
+
+>>> class A(object):  # new class
+...     def save(self):
+...         print "called A.save()"
+>>> class B(A):
+...     pass
+>>> class C(A):
+...     def save(self):
+...         print "called C.save()"
+>>> class D(B, C):
+...     pass
+
+>>> D().save()
+called C.save()
+"""
+
+class A(object):
+    def m(self):
+        return "A"
+
+class B(A):
+    def m(self):
+        return "B" + super(B, self).m()
+
+class C(A):
+    def m(self):
+        return "C" + super(C, self).m()
+
+class D(C, B):
+    def m(self):
+        return "D" + super(D, self).m()
+
+
+test_7 = """
+
+Cooperative methods and "super"
+
+>>> print D().m() # "DCBA"
+DCBA
+"""
+
+test_8 = """
+
+Backwards incompatibilities
+
+>>> class A:
+...     def foo(self):
+...         print "called A.foo()"
+
+>>> class B(A):
+...     pass
+
+>>> class C(A):
+...     def foo(self):
+...         B.foo(self)
+
+>>> C().foo()
+Traceback (most recent call last):
+ ...
+TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
+
+>>> class C(A):
+...     def foo(self):
+...         A.foo(self)
+>>> C().foo()
+called A.foo()
+"""
+
+__test__ = {"tut1": test_1,
+            "tut2": test_2,
+            "tut3": test_3,
+            "tut4": test_4,
+            "tut5": test_5,
+            "tut6": test_6,
+            "tut7": test_7,
+            "tut8": test_8}
+
+# Magic test name that regrtest.py invokes *after* importing this module.
+# This worms around a bootstrap problem.
+# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
+# so this works as expected in both ways of running regrtest.
+def test_main(verbose=None):
+    # Obscure:  import this module as test.test_descrtut instead of as
+    # plain test_descrtut because the name of this module works its way
+    # into the doctest examples, and unless the full test.test_descrtut
+    # business is used the name can change depending on how the test is
+    # invoked.
+    from test import test_support, test_descrtut
+    test_support.run_doctest(test_descrtut, verbose)
+
+# This part isn't needed for regrtest, but for running the test directly.
+if __name__ == "__main__":
+    test_main(1)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_dict.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_dict.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,477 @@
+import unittest
+from test import test_support
+
+import sys, UserDict, cStringIO
+
+
+class DictTest(unittest.TestCase):
+    def test_constructor(self):
+        # calling built-in types without argument must return empty
+        self.assertEqual(dict(), {})
+        self.assert_(dict() is not {})
+
+    def test_bool(self):
+        self.assert_(not {})
+        self.assert_({1: 2})
+        self.assert_(bool({}) is False)
+        self.assert_(bool({1: 2}) is True)
+
+    def test_keys(self):
+        d = {}
+        self.assertEqual(d.keys(), [])
+        d = {'a': 1, 'b': 2}
+        k = d.keys()
+        self.assert_(d.has_key('a'))
+        self.assert_(d.has_key('b'))
+
+        self.assertRaises(TypeError, d.keys, None)
+
+    def test_values(self):
+        d = {}
+        self.assertEqual(d.values(), [])
+        d = {1:2}
+        self.assertEqual(d.values(), [2])
+
+        self.assertRaises(TypeError, d.values, None)
+
+    def test_items(self):
+        d = {}
+        self.assertEqual(d.items(), [])
+
+        d = {1:2}
+        self.assertEqual(d.items(), [(1, 2)])
+
+        self.assertRaises(TypeError, d.items, None)
+
+    def test_has_key(self):
+        d = {}
+        self.assert_(not d.has_key('a'))
+        d = {'a': 1, 'b': 2}
+        k = d.keys()
+        k.sort()
+        self.assertEqual(k, ['a', 'b'])
+
+        self.assertRaises(TypeError, d.has_key)
+
+    def test_contains(self):
+        d = {}
+        self.assert_(not ('a' in d))
+        self.assert_('a' not in d)
+        d = {'a': 1, 'b': 2}
+        self.assert_('a' in d)
+        self.assert_('b' in d)
+        self.assert_('c' not in d)
+
+        self.assertRaises(TypeError, d.__contains__)
+
+    def test_len(self):
+        d = {}
+        self.assertEqual(len(d), 0)
+        d = {'a': 1, 'b': 2}
+        self.assertEqual(len(d), 2)
+
+    def test_getitem(self):
+        d = {'a': 1, 'b': 2}
+        self.assertEqual(d['a'], 1)
+        self.assertEqual(d['b'], 2)
+        d['c'] = 3
+        d['a'] = 4
+        self.assertEqual(d['c'], 3)
+        self.assertEqual(d['a'], 4)
+        del d['b']
+        self.assertEqual(d, {'a': 4, 'c': 3})
+
+        self.assertRaises(TypeError, d.__getitem__)
+
+        class BadEq(object):
+            def __eq__(self, other):
+                raise Exc()
+
+        d = {}
+        d[BadEq()] = 42
+        self.assertRaises(KeyError, d.__getitem__, 23)
+
+        class Exc(Exception): pass
+
+        class BadHash(object):
+            fail = False
+            def __hash__(self):
+                if self.fail:
+                    raise Exc()
+                else:
+                    return 42
+
+        x = BadHash()
+        d[x] = 42
+        x.fail = True
+        self.assertRaises(Exc, d.__getitem__, x)
+
+    def test_clear(self):
+        d = {1:1, 2:2, 3:3}
+        d.clear()
+        self.assertEqual(d, {})
+
+        self.assertRaises(TypeError, d.clear, None)
+
+    def test_update(self):
+        d = {}
+        d.update({1:100})
+        d.update({2:20})
+        d.update({1:1, 2:2, 3:3})
+        self.assertEqual(d, {1:1, 2:2, 3:3})
+
+        d.update()
+        self.assertEqual(d, {1:1, 2:2, 3:3})
+
+        self.assertRaises((TypeError, AttributeError), d.update, None)
+
+        class SimpleUserDict:
+            def __init__(self):
+                self.d = {1:1, 2:2, 3:3}
+            def keys(self):
+                return self.d.keys()
+            def __getitem__(self, i):
+                return self.d[i]
+        d.clear()
+        d.update(SimpleUserDict())
+        self.assertEqual(d, {1:1, 2:2, 3:3})
+
+        class Exc(Exception): pass
+
+        d.clear()
+        class FailingUserDict:
+            def keys(self):
+                raise Exc
+        self.assertRaises(Exc, d.update, FailingUserDict())
+
+        class FailingUserDict:
+            def keys(self):
+                class BogonIter:
+                    def __init__(self):
+                        self.i = 1
+                    def __iter__(self):
+                        return self
+                    def next(self):
+                        if self.i:
+                            self.i = 0
+                            return 'a'
+                        raise Exc
+                return BogonIter()
+            def __getitem__(self, key):
+                return key
+        self.assertRaises(Exc, d.update, FailingUserDict())
+
+        class FailingUserDict:
+            def keys(self):
+                class BogonIter:
+                    def __init__(self):
+                        self.i = ord('a')
+                    def __iter__(self):
+                        return self
+                    def next(self):
+                        if self.i <= ord('z'):
+                            rtn = chr(self.i)
+                            self.i += 1
+                            return rtn
+                        raise StopIteration
+                return BogonIter()
+            def __getitem__(self, key):
+                raise Exc
+        self.assertRaises(Exc, d.update, FailingUserDict())
+
+        class badseq(object):
+            def __iter__(self):
+                return self
+            def next(self):
+                raise Exc()
+
+        self.assertRaises(Exc, {}.update, badseq())
+
+        self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
+
+    def test_fromkeys(self):
+        self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
+        d = {}
+        self.assert_(not(d.fromkeys('abc') is d))
+        self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
+        self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
+        self.assertEqual(d.fromkeys([]), {})
+        def g():
+            yield 1
+        self.assertEqual(d.fromkeys(g()), {1:None})
+        self.assertRaises(TypeError, {}.fromkeys, 3)
+        class dictlike(dict): pass
+        self.assertEqual(dictlike.fromkeys('a'), {'a':None})
+        self.assertEqual(dictlike().fromkeys('a'), {'a':None})
+        self.assert_(type(dictlike.fromkeys('a')) is dictlike)
+        self.assert_(type(dictlike().fromkeys('a')) is dictlike)
+        class mydict(dict):
+            def __new__(cls):
+                return UserDict.UserDict()
+        ud = mydict.fromkeys('ab')
+        self.assertEqual(ud, {'a':None, 'b':None})
+        self.assert_(isinstance(ud, UserDict.UserDict))
+        self.assertRaises(TypeError, dict.fromkeys)
+
+        class Exc(Exception): pass
+
+        class baddict1(dict):
+            def __init__(self):
+                raise Exc()
+
+        self.assertRaises(Exc, baddict1.fromkeys, [1])
+
+        class BadSeq(object):
+            def __iter__(self):
+                return self
+            def next(self):
+                raise Exc()
+
+        self.assertRaises(Exc, dict.fromkeys, BadSeq())
+
+        class baddict2(dict):
+            def __setitem__(self, key, value):
+                raise Exc()
+
+        self.assertRaises(Exc, baddict2.fromkeys, [1])
+
+    def test_copy(self):
+        d = {1:1, 2:2, 3:3}
+        self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
+        self.assertEqual({}.copy(), {})
+        self.assertRaises(TypeError, d.copy, None)
+
+    def test_get(self):
+        d = {}
+        self.assert_(d.get('c') is None)
+        self.assertEqual(d.get('c', 3), 3)
+        d = {'a' : 1, 'b' : 2}
+        self.assert_(d.get('c') is None)
+        self.assertEqual(d.get('c', 3), 3)
+        self.assertEqual(d.get('a'), 1)
+        self.assertEqual(d.get('a', 3), 1)
+        self.assertRaises(TypeError, d.get)
+        self.assertRaises(TypeError, d.get, None, None, None)
+
+    def test_setdefault(self):
+        # dict.setdefault()
+        d = {}
+        self.assert_(d.setdefault('key0') is None)
+        d.setdefault('key0', [])
+        self.assert_(d.setdefault('key0') is None)
+        d.setdefault('key', []).append(3)
+        self.assertEqual(d['key'][0], 3)
+        d.setdefault('key', []).append(4)
+        self.assertEqual(len(d['key']), 2)
+        self.assertRaises(TypeError, d.setdefault)
+
+        class Exc(Exception): pass
+
+        class BadHash(object):
+            fail = False
+            def __hash__(self):
+                if self.fail:
+                    raise Exc()
+                else:
+                    return 42
+
+        x = BadHash()
+        d[x] = 42
+        x.fail = True
+        self.assertRaises(Exc, d.setdefault, x, [])
+
+    def test_popitem(self):
+        # dict.popitem()
+        for copymode in -1, +1:
+            # -1: b has same structure as a
+            # +1: b is a.copy()
+            for log2size in range(4): # XXX 12 too large for PyPy
+                size = 2**log2size
+                a = {}
+                b = {}
+                for i in range(size):
+                    a[repr(i)] = i
+                    if copymode < 0:
+                        b[repr(i)] = i
+                if copymode > 0:
+                    b = a.copy()
+                for i in range(size):
+                    ka, va = ta = a.popitem()
+                    self.assertEqual(va, int(ka))
+                    kb, vb = tb = b.popitem()
+                    self.assertEqual(vb, int(kb))
+                    self.assert_(not(copymode < 0 and ta != tb))
+                self.assert_(not a)
+                self.assert_(not b)
+
+        d = {}
+        self.assertRaises(KeyError, d.popitem)
+
+    def test_pop(self):
+        # Tests for pop with specified key
+        d = {}
+        k, v = 'abc', 'def'
+        d[k] = v
+        self.assertRaises(KeyError, d.pop, 'ghi')
+
+        self.assertEqual(d.pop(k), v)
+        self.assertEqual(len(d), 0)
+
+        self.assertRaises(KeyError, d.pop, k)
+
+        # verify longs/ints get same value when key > 32 bits (for 64-bit archs)
+        # see SF bug #689659
+        x = 4503599627370496L
+        y = 4503599627370496
+        h = {x: 'anything', y: 'something else'}
+        self.assertEqual(h[x], h[y])
+
+        self.assertEqual(d.pop(k, v), v)
+        d[k] = v
+        self.assertEqual(d.pop(k, 1), v)
+
+        self.assertRaises(TypeError, d.pop)
+
+        class Exc(Exception): pass
+
+        class BadHash(object):
+            fail = False
+            def __hash__(self):
+                if self.fail:
+                    raise Exc()
+                else:
+                    return 42
+
+        x = BadHash()
+        d[x] = 42
+        x.fail = True
+        self.assertRaises(Exc, d.pop, x)
+
+    def test_mutatingiteration(self):
+        d = {}
+        d[1] = 1
+        try:
+            for i in d:
+                d[i+1] = 1
+        except RuntimeError:
+            pass
+        else:
+            self.fail("changing dict size during iteration doesn't raise Error")
+
+    def test_repr(self):
+        d = {}
+        self.assertEqual(repr(d), '{}')
+        d[1] = 2
+        self.assertEqual(repr(d), '{1: 2}')
+        d = {}
+        d[1] = d
+        self.assertEqual(repr(d), '{1: {...}}')
+
+        class Exc(Exception): pass
+
+        class BadRepr(object):
+            def __repr__(self):
+                raise Exc()
+
+        d = {1: BadRepr()}
+        self.assertRaises(Exc, repr, d)
+
+    def test_le(self):
+        self.assert_(not ({} < {}))
+        self.assert_(not ({1: 2} < {1L: 2L}))
+
+        class Exc(Exception): pass
+
+        class BadCmp(object):
+            def __cmp__(self, other):
+                raise Exc()
+
+        d1 = {BadCmp(): 1}
+        d2 = {1: 1}
+        try:
+            d1 < d2
+        except Exc:
+            pass
+        else:
+            self.fail("< didn't raise Exc")
+
+    def test_missing(self):
+        # Make sure dict doesn't have a __missing__ method
+        self.assertEqual(hasattr(dict, "__missing__"), False)
+        self.assertEqual(hasattr({}, "__missing__"), False)
+        # Test several cases:
+        # (D) subclass defines __missing__ method returning a value
+        # (E) subclass defines __missing__ method raising RuntimeError
+        # (F) subclass sets __missing__ instance variable (no effect)
+        # (G) subclass doesn't define __missing__ at a all
+        class D(dict):
+            def __missing__(self, key):
+                return 42
+        d = D({1: 2, 3: 4})
+        self.assertEqual(d[1], 2)
+        self.assertEqual(d[3], 4)
+        self.assert_(2 not in d)
+        self.assert_(2 not in d.keys())
+        self.assertEqual(d[2], 42)
+        class E(dict):
+            def __missing__(self, key):
+                raise RuntimeError(key)
+        e = E()
+        try:
+            e[42]
+        except RuntimeError, err:
+            self.assertEqual(err.args, (42,))
+        else:
+            self.fail("e[42] didn't raise RuntimeError")
+        class F(dict):
+            def __init__(self):
+                # An instance variable __missing__ should have no effect
+                self.__missing__ = lambda key: None
+        f = F()
+        try:
+            f[42]
+        except KeyError, err:
+            self.assertEqual(err.args, (42,))
+        else:
+            self.fail("f[42] didn't raise KeyError")
+        class G(dict):
+            pass
+        g = G()
+        try:
+            g[42]
+        except KeyError, err:
+            self.assertEqual(err.args, (42,))
+        else:
+            self.fail("g[42] didn't raise KeyError")
+
+    def test_tuple_keyerror(self):
+        # SF #1576657
+        d = {}
+        try:
+            d[(1,)]
+        except KeyError, e:
+            self.assertEqual(e.args, ((1,),))
+        else:
+            self.fail("missing KeyError")
+
+
+from test import mapping_tests
+
+class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
+    type2test = dict
+
+class Dict(dict):
+    pass
+
+class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
+    type2test = Dict
+
+def test_main():
+    test_support.run_unittest(
+        DictTest,
+        GeneralMappingTests,
+        SubclassMappingTests,
+    )
+
+if __name__ == "__main__":
+    test_main()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_enumerate.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_enumerate.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,219 @@
+import unittest
+import sys
+
+from test import test_support
+
+class G:
+    'Sequence using __getitem__'
+    def __init__(self, seqn):
+        self.seqn = seqn
+    def __getitem__(self, i):
+        return self.seqn[i]
+
+class I:
+    'Sequence using iterator protocol'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        return self
+    def next(self):
+        if self.i >= len(self.seqn): raise StopIteration
+        v = self.seqn[self.i]
+        self.i += 1
+        return v
+
+class Ig:
+    'Sequence using iterator protocol defined with a generator'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        for val in self.seqn:
+            yield val
+
+class X:
+    'Missing __getitem__ and __iter__'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def next(self):
+        if self.i >= len(self.seqn): raise StopIteration
+        v = self.seqn[self.i]
+        self.i += 1
+        return v
+
+class E:
+    'Test propagation of exceptions'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        return self
+    def next(self):
+        3 // 0
+
+class N:
+    'Iterator missing next()'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        return self
+
+class EnumerateTestCase(unittest.TestCase):
+
+    enum = enumerate
+    seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')]
+
+    def test_basicfunction(self):
+        self.assertEqual(type(self.enum(self.seq)), self.enum)
+        e = self.enum(self.seq)
+        self.assertEqual(iter(e), e)
+        self.assertEqual(list(self.enum(self.seq)), self.res)
+        self.enum.__doc__
+
+    def test_getitemseqn(self):
+        self.assertEqual(list(self.enum(G(self.seq))), self.res)
+        e = self.enum(G(''))
+        self.assertRaises(StopIteration, e.next)
+
+    def test_iteratorseqn(self):
+        self.assertEqual(list(self.enum(I(self.seq))), self.res)
+        e = self.enum(I(''))
+        self.assertRaises(StopIteration, e.next)
+
+    def test_iteratorgenerator(self):
+        self.assertEqual(list(self.enum(Ig(self.seq))), self.res)
+        e = self.enum(Ig(''))
+        self.assertRaises(StopIteration, e.next)
+
+    def test_noniterable(self):
+        self.assertRaises(TypeError, self.enum, X(self.seq))
+
+    def test_illformediterable(self):
+        self.assertRaises(TypeError, list, self.enum(N(self.seq)))
+
+    def test_exception_propagation(self):
+        self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq)))
+
+    def test_argumentcheck(self):
+        self.assertRaises(TypeError, self.enum) # no arguments
+        self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable)
+        self.assertRaises(TypeError, self.enum, 'abc', 2) # too many arguments
+
+    #Don't test this in PyPy, since the tuple can't be reused
+    def DONOT_test_tuple_reuse(self):
+        # Tests an implementation detail where tuple is reused
+        # whenever nothing else holds a reference to it
+        self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
+        self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq)))
+
+class MyEnum(enumerate):
+    pass
+
+class SubclassTestCase(EnumerateTestCase):
+
+    enum = MyEnum
+
+class TestEmpty(EnumerateTestCase):
+
+    seq, res = '', []
+
+class TestBig(EnumerateTestCase):
+    ##original test (takes too long in PyPy):
+    #seq = range(10,20000, 2)
+    #res = zip(range(20000), seq)
+
+    seq = range(10, 200, 2)
+    res = zip(range(200), seq)
+
+class TestReversed(unittest.TestCase):
+
+    def test_simple(self):
+        class A:
+            def __getitem__(self, i):
+                if i < 5:
+                    return str(i)
+                raise StopIteration
+            def __len__(self):
+                return 5
+        for data in 'abc', range(5), tuple(enumerate('abc')), A(), xrange(1,17,5):
+            self.assertEqual(list(data)[::-1], list(reversed(data)))
+        self.assertRaises(TypeError, reversed, {})
+
+# Implementation detail
+#    def test_xrange_optimization(self):
+#        x = xrange(1)
+#        self.assertEqual(type(reversed(x)), type(iter(x)))
+
+    def test_len(self):
+        # This is an implementation detail, not an interface requirement
+        from test.test_iterlen import len
+        for s in ('hello', tuple('hello'), list('hello'), xrange(5)):
+            self.assertEqual(len(reversed(s)), len(s))
+            r = reversed(s)
+            list(r)
+            self.assertEqual(len(r), 0)
+        class SeqWithWeirdLen:
+            called = False
+            def __len__(self):
+                if not self.called:
+                    self.called = True
+                    return 10
+                raise ZeroDivisionError
+            def __getitem__(self, index):
+                return index
+        r = reversed(SeqWithWeirdLen())
+        self.assertRaises(ZeroDivisionError, len, r)
+
+
+    def test_gc(self):
+        class Seq:
+            def __len__(self):
+                return 10
+            def __getitem__(self, index):
+                return index
+        s = Seq()
+        r = reversed(s)
+        s.r = r
+
+    def test_args(self):
+        self.assertRaises(TypeError, reversed)
+        self.assertRaises(TypeError, reversed, [], 'extra')
+
+    def test_bug1229429(self):
+        # this bug was never in reversed, it was in
+        # PyObject_CallMethod, and reversed_new calls that sometimes.
+        if not hasattr(sys, "getrefcount"):
+            return
+        def f():
+            pass
+        r = f.__reversed__ = object()
+        rc = sys.getrefcount(r)
+        for i in range(10):
+            try:
+                reversed(f)
+            except TypeError:
+                pass
+            else:
+                self.fail("non-callable __reversed__ didn't raise!")
+        self.assertEqual(rc, sys.getrefcount(r))
+
+
+def test_main(verbose=None):
+    testclasses = (EnumerateTestCase, SubclassTestCase, TestEmpty, TestBig,
+                   TestReversed)
+    test_support.run_unittest(*testclasses)
+
+    # verify reference counting
+    import sys
+    if verbose and hasattr(sys, "gettotalrefcount"):
+        counts = [None] * 5
+        for i in xrange(len(counts)):
+            test_support.run_unittest(*testclasses)
+            counts[i] = sys.gettotalrefcount()
+        print counts
+
+if __name__ == "__main__":
+    test_main(verbose=True)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_exceptions.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_exceptions.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,349 @@
+# Python test set -- part 5, built-in exceptions
+
+import os
+import sys
+import unittest
+import warnings
+import pickle, cPickle
+
+from test.test_support import TESTFN, unlink, run_unittest
+
+# XXX This is not really enough, each *operation* should be tested!
+
+class ExceptionTests(unittest.TestCase):
+
+    def testReload(self):
+        # Reloading the built-in exceptions module failed prior to Py2.2, while it
+        # should act the same as reloading built-in sys.
+        try:
+            import exceptions
+            reload(exceptions)
+        except ImportError, e:
+            self.fail("reloading exceptions: %s" % e)
+
+    def raise_catch(self, exc, excname):
+        try:
+            raise exc, "spam"
+        except exc, err:
+            buf1 = str(err)
+        try:
+            raise exc("spam")
+        except exc, err:
+            buf2 = str(err)
+        self.assertEquals(buf1, buf2)
+        self.assertEquals(exc.__name__, excname)
+
+    def testRaising(self):
+        self.raise_catch(AttributeError, "AttributeError")
+        self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
+
+        self.raise_catch(EOFError, "EOFError")
+        fp = open(TESTFN, 'w')
+        fp.close()
+        fp = open(TESTFN, 'r')
+        savestdin = sys.stdin
+        try:
+            try:
+                sys.stdin = fp
+                x = raw_input()
+            except EOFError:
+                pass
+        finally:
+            sys.stdin = savestdin
+            fp.close()
+            unlink(TESTFN)
+
+        self.raise_catch(IOError, "IOError")
+        self.assertRaises(IOError, open, 'this file does not exist', 'r')
+
+        self.raise_catch(ImportError, "ImportError")
+        self.assertRaises(ImportError, __import__, "undefined_module")
+
+        self.raise_catch(IndexError, "IndexError")
+        x = []
+        self.assertRaises(IndexError, x.__getitem__, 10)
+
+        self.raise_catch(KeyError, "KeyError")
+        x = {}
+        self.assertRaises(KeyError, x.__getitem__, 'key')
+
+        self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
+
+        self.raise_catch(MemoryError, "MemoryError")
+
+        self.raise_catch(NameError, "NameError")
+        try: x = undefined_variable
+        except NameError: pass
+
+        self.raise_catch(OverflowError, "OverflowError")
+        x = 1
+        for dummy in range(128):
+            x += x  # this simply shouldn't blow up
+
+        self.raise_catch(RuntimeError, "RuntimeError")
+
+        self.raise_catch(SyntaxError, "SyntaxError")
+        try: exec '/\n'
+        except SyntaxError: pass
+
+        self.raise_catch(IndentationError, "IndentationError")
+
+        self.raise_catch(TabError, "TabError")
+        # can only be tested under -tt, and is the only test for -tt
+        #try: compile("try:\n\t1/0\n    \t1/0\nfinally:\n pass\n", '<string>', 'exec')
+        #except TabError: pass
+        #else: self.fail("TabError not raised")
+
+        self.raise_catch(SystemError, "SystemError")
+
+        self.raise_catch(SystemExit, "SystemExit")
+        self.assertRaises(SystemExit, sys.exit, 0)
+
+        self.raise_catch(TypeError, "TypeError")
+        try: [] + ()
+        except TypeError: pass
+
+        self.raise_catch(ValueError, "ValueError")
+        self.assertRaises(ValueError, chr, 10000)
+
+        self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
+        try: x = 1/0
+        except ZeroDivisionError: pass
+
+        self.raise_catch(Exception, "Exception")
+        try: x = 1/0
+        except Exception, e: pass
+
+    def testSyntaxErrorMessage(self):
+        # make sure the right exception message is raised for each of
+        # these code fragments
+
+        def ckmsg(src, msg):
+            try:
+                compile(src, '<fragment>', 'exec')
+            except SyntaxError, e:
+                if e.msg != msg:
+                    self.fail("expected %s, got %s" % (msg, e.msg))
+            else:
+                self.fail("failed to get expected SyntaxError")
+
+        s = '''while 1:
+            try:
+                pass
+            finally:
+                continue'''
+
+        if not sys.platform.startswith('java'):
+            ckmsg(s, "'continue' not supported inside 'finally' clause")
+
+        s = '''if 1:
+        try:
+            continue
+        except:
+            pass'''
+
+        ckmsg(s, "'continue' not properly in loop")
+        ckmsg("continue\n", "'continue' not properly in loop")
+
+    def testSettingException(self):
+        # test that setting an exception at the C level works even if the
+        # exception object can't be constructed.
+
+        class BadException:
+            def __init__(self_):
+                raise RuntimeError, "can't instantiate BadException"
+
+        def test_capi1():
+            import _testcapi
+            try:
+                _testcapi.raise_exception(BadException, 1)
+            except TypeError, err:
+                exc, err, tb = sys.exc_info()
+                co = tb.tb_frame.f_code
+                self.assertEquals(co.co_name, "test_capi1")
+                self.assert_(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
+            else:
+                self.fail("Expected exception")
+
+        def test_capi2():
+            import _testcapi
+            try:
+                _testcapi.raise_exception(BadException, 0)
+            except RuntimeError, err:
+                exc, err, tb = sys.exc_info()
+                co = tb.tb_frame.f_code
+                self.assertEquals(co.co_name, "__init__")
+                self.assert_(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
+                co2 = tb.tb_frame.f_back.f_code
+                self.assertEquals(co2.co_name, "test_capi2")
+            else:
+                self.fail("Expected exception")
+
+        try:
+            import _testcapi
+        except ImportError:
+            pass
+        else:
+            test_capi1()
+            test_capi2()
+
+    def test_WindowsError(self):
+        try:
+            WindowsError
+        except NameError:
+            pass
+        else:
+            self.failUnlessEqual(str(WindowsError(1001)),
+                                 "1001")
+            self.failUnlessEqual(str(WindowsError(1001, "message")),
+                                 "[Error 1001] message")
+            self.failUnlessEqual(WindowsError(1001, "message").errno, 22)
+            self.failUnlessEqual(WindowsError(1001, "message").winerror, 1001)
+
+    def testAttributes(self):
+        # test that exception attributes are happy
+
+        exceptionList = [
+            (BaseException, (), {'message' : '', 'args' : ()}),
+            (BaseException, (1, ), {'message' : 1, 'args' : (1,)}),
+            (BaseException, ('foo',),
+                {'message' : 'foo', 'args' : ('foo',)}),
+            (BaseException, ('foo', 1),
+                {'message' : '', 'args' : ('foo', 1)}),
+            (SystemExit, ('foo',),
+                {'message' : 'foo', 'args' : ('foo',), 'code' : 'foo'}),
+            (IOError, ('foo',),
+                {'message' : 'foo', 'args' : ('foo',), 'filename' : None,
+                 'errno' : None, 'strerror' : None}),
+            (IOError, ('foo', 'bar'),
+                {'message' : '', 'args' : ('foo', 'bar'), 'filename' : None,
+                 'errno' : 'foo', 'strerror' : 'bar'}),
+            (IOError, ('foo', 'bar', 'baz'),
+                {'message' : '', 'args' : ('foo', 'bar'), 'filename' : 'baz',
+                 'errno' : 'foo', 'strerror' : 'bar'}),
+            (IOError, ('foo', 'bar', 'baz', 'quux'),
+                {'message' : '', 'args' : ('foo', 'bar', 'baz', 'quux')}),
+            (EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
+                {'message' : '', 'args' : ('errnoStr', 'strErrorStr'),
+                 'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
+                 'filename' : 'filenameStr'}),
+            (EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
+                {'message' : '', 'args' : (1, 'strErrorStr'), 'errno' : 1,
+                 'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
+            (SyntaxError, ('msgStr',),
+                {'message' : 'msgStr', 'args' : ('msgStr',), 'text' : None,
+                 'print_file_and_line' : None, 'msg' : 'msgStr',
+                 'filename' : None, 'lineno' : None, 'offset' : None}),
+            (SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
+                           'textStr')),
+                {'message' : '', 'offset' : 'offsetStr', 'text' : 'textStr',
+                 'args' : ('msgStr', ('filenameStr', 'linenoStr',
+                                      'offsetStr', 'textStr')),
+                 'print_file_and_line' : None, 'msg' : 'msgStr',
+                 'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
+            (SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
+                           'textStr', 'print_file_and_lineStr'),
+                {'message' : '', 'text' : None,
+                 'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
+                           'textStr', 'print_file_and_lineStr'),
+                 'print_file_and_line' : None, 'msg' : 'msgStr',
+                 'filename' : None, 'lineno' : None, 'offset' : None}),
+            (UnicodeError, (), {'message' : '', 'args' : (),}),
+            (UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
+                {'message' : '', 'args' : ('ascii', u'a', 0, 1,
+                                           'ordinal not in range'),
+                 'encoding' : 'ascii', 'object' : u'a',
+                 'start' : 0, 'reason' : 'ordinal not in range'}),
+            (UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
+                {'message' : '', 'args' : ('ascii', '\xff', 0, 1,
+                                           'ordinal not in range'),
+                 'encoding' : 'ascii', 'object' : '\xff',
+                 'start' : 0, 'reason' : 'ordinal not in range'}),
+            (UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
+                {'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
+                 'object' : u'\u3042', 'reason' : 'ouch',
+                 'start' : 0, 'end' : 1}),
+        ]
+        try:
+            exceptionList.append(
+                (WindowsError, (1, 'strErrorStr', 'filenameStr'),
+                    {'message' : '', 'args' : (1, 'strErrorStr'),
+                     'strerror' : 'strErrorStr', 'winerror' : 1,
+                     'errno' : 22, 'filename' : 'filenameStr'})
+            )
+        except NameError:
+            pass
+
+        for exc, args, expected in exceptionList:
+            try:
+                raise exc(*args)
+            except BaseException, e:
+                if type(e) is not exc:
+                    raise
+                # Verify module name
+                self.assertEquals(type(e).__module__, 'exceptions')
+                # Verify no ref leaks in Exc_str()
+                s = str(e)
+                for checkArgName in expected:
+                    self.assertEquals(repr(getattr(e, checkArgName)),
+                                      repr(expected[checkArgName]),
+                                      'exception "%s", attribute "%s"' %
+                                       (repr(e), checkArgName))
+
+                # test for pickling support
+                for p in pickle, cPickle:
+                    for protocol in range(p.HIGHEST_PROTOCOL + 1):
+                        new = p.loads(p.dumps(e, protocol))
+                        for checkArgName in expected:
+                            got = repr(getattr(new, checkArgName))
+                            want = repr(expected[checkArgName])
+                            self.assertEquals(got, want,
+                                              'pickled "%r", attribute "%s' %
+                                              (e, checkArgName))
+
+    def testSlicing(self):
+        # Test that you can slice an exception directly instead of requiring
+        # going through the 'args' attribute.
+        args = (1, 2, 3)
+        exc = BaseException(*args)
+        self.failUnlessEqual(exc[:], args)
+
+    def testKeywordArgs(self):
+        # test that builtin exception don't take keyword args,
+        # but user-defined subclasses can if they want
+        self.assertRaises(TypeError, BaseException, a=1)
+
+        class DerivedException(BaseException):
+            def __init__(self, fancy_arg):
+                BaseException.__init__(self)
+                self.fancy_arg = fancy_arg
+
+        x = DerivedException(fancy_arg=42)
+        self.assertEquals(x.fancy_arg, 42)
+
+    def testInfiniteRecursion(self):
+        def f():
+            return f()
+        self.assertRaises(RuntimeError, f)
+
+        def g():
+            try:
+                return g()
+            except ValueError:
+                return -1
+        self.assertRaises(RuntimeError, g)
+
+    def testUnicodeStrUsage(self):
+        # Make sure both instances and classes have a str and unicode
+        # representation.
+        self.failUnless(str(Exception))
+        self.failUnless(unicode(Exception))
+        self.failUnless(str(Exception('a')))
+        self.failUnless(unicode(Exception(u'a')))
+
+
+def test_main():
+    run_unittest(ExceptionTests)
+
+if __name__ == '__main__':
+    test_main()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_file.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_file.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,343 @@
+import sys
+import os
+import gc
+import unittest
+from array import array
+from weakref import proxy
+
+from test.test_support import TESTFN, findfile, run_unittest
+from UserList import UserList
+
+class AutoFileTests(unittest.TestCase):
+    # file tests for which a test file is automatically set up
+
+    def setUp(self):
+        self.f = open(TESTFN, 'wb')
+
+    def tearDown(self):
+        if self.f:
+            self.f.close()
+        os.remove(TESTFN)
+
+    def testWeakRefs(self):
+        # verify weak references
+        p = proxy(self.f)
+        p.write('teststring')
+        self.assertEquals(self.f.name, p.name)
+        self.f.close()
+        self.f = None
+        self.assertRaises(ReferenceError, getattr, p, 'name')
+
+    def testAttributes(self):
+        # verify expected attributes exist
+        f = self.f
+        softspace = f.softspace
+        f.name     # merely shouldn't blow up
+        f.mode     # ditto
+        f.closed   # ditto
+
+        # verify softspace is writable
+        f.softspace = softspace    # merely shouldn't blow up
+
+        # verify the others aren't
+        for attr in 'name', 'mode', 'closed':
+            self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
+
+    def testReadinto(self):
+        # verify readinto
+        self.f.write('12')
+        self.f.close()
+        a = array('c', 'x'*10)
+        self.f = open(TESTFN, 'rb')
+        n = self.f.readinto(a)
+        self.assertEquals('12', a.tostring()[:n])
+
+    def testWritelinesUserList(self):
+        # verify writelines with instance sequence
+        # XXX writeslines on UserList crashes interpreter
+        #l = UserList(['1', '2'])
+        l = ['1', '2']
+        self.f.writelines(l)
+        self.f.close()
+        self.f = open(TESTFN, 'rb')
+        buf = self.f.read()
+        self.assertEquals(buf, '12')
+
+    def testWritelinesIntegers(self):
+        # verify writelines with integers
+        self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
+
+    def testWritelinesIntegersUserList(self):
+        # verify writelines with integers in UserList
+        # l = UserList([1,2,3])
+        l = [1,2,3]
+
+        self.assertRaises(TypeError, self.f.writelines, l)
+
+    def testWritelinesNonString(self):
+        # verify writelines with non-string object
+        class NonString:
+            pass
+
+#        self.assertRaises(TypeError, self.f.writelines,
+#                          [NonString(), NonString()])
+
+    def testRepr(self):
+        # verify repr works
+        self.assert_(repr(self.f).startswith("<open file '" + TESTFN))
+
+    def testErrors(self):
+        f = self.f
+        self.assertEquals(f.name, TESTFN)
+        self.assert_(not f.isatty())
+        self.assert_(not f.closed)
+
+        self.assertRaises(TypeError, f.readinto, "")
+        f.close()
+        self.assert_(f.closed)
+
+    def testMethods(self):
+        a = array('c', 'x'*10)
+        methods = {'fileno': (), 'flush': (), 'isatty': (), 'next': (),
+                'read': (), 'readinto': (a,), 'readline': (), 'readlines': (),
+                'seek': (0,), 'tell': (), 'truncate': (), 'write': ('',),
+                'writelines': ([],), 'xreadlines': (), '__iter__': () }
+
+        if sys.platform.startswith('atheos'):
+            del methods['truncate']
+
+        # __exit__ should close the file
+        self.f.__exit__(None, None, None)
+        self.assert_(self.f.closed)
+
+        for methodname, args in methods.items():
+            method = getattr(self.f, methodname)
+            # should raise on closed file
+            self.assertRaises(ValueError, method, *args)
+
+        # file is closed, __exit__ shouldn't do anything
+        self.assertEquals(self.f.__exit__(None, None, None), None)
+        # it must also return None if an exception was given
+        try:
+            1/0
+        except:
+            self.assertEquals(self.f.__exit__(*sys.exc_info()), None)
+
+
+class OtherFileTests(unittest.TestCase):
+
+    def testModeStrings(self):
+        # check invalid mode strings
+        for mode in ("", "aU", "wU+"):
+            try:
+                f = open(TESTFN, mode)
+            except ValueError:
+                pass
+            else:
+                f.close()
+                self.fail('%r is an invalid file mode' % mode)
+
+    def testStdin(self):
+        # This causes the interpreter to exit on OSF1 v5.1.
+        if sys.platform != 'osf1V5':
+            self.assertRaises(IOError, sys.stdin.seek, -1)
+        else:
+            print >>sys.__stdout__, (
+                '  Skipping sys.stdin.seek(-1), it may crash the interpreter.'
+                ' Test manually.')
+        self.assertRaises(IOError, sys.stdin.truncate)
+
+    def testUnicodeOpen(self):
+        # verify repr works for unicode too
+        f = open(unicode(TESTFN), "w")
+        self.assert_(repr(f).startswith("<open file u'" + TESTFN))
+        f.close()
+        os.unlink(TESTFN)
+
+    def testBadModeArgument(self):
+        # verify that we get a sensible error message for bad mode argument
+        bad_mode = "qwerty"
+        try:
+            f = open(TESTFN, bad_mode)
+        except ValueError, msg:
+            if msg[0] != 0:
+                s = str(msg)
+                if s.find(TESTFN) != -1 or s.find(bad_mode) == -1:
+                    self.fail("bad error message for invalid mode: %s" % s)
+            # if msg[0] == 0, we're probably on Windows where there may be
+            # no obvious way to discover why open() failed.
+        else:
+            f.close()
+            self.fail("no error for invalid mode: %s" % bad_mode)
+
+    def testSetBufferSize(self):
+        # make sure that explicitly setting the buffer size doesn't cause
+        # misbehaviour especially with repeated close() calls
+        for s in (-1, 0, 1, 512):
+            try:
+                f = open(TESTFN, 'w', s)
+                f.write(str(s))
+                f.close()
+                f.close()
+                f = open(TESTFN, 'r', s)
+                d = int(f.read())
+                f.close()
+                f.close()
+            except IOError, msg:
+                self.fail('error setting buffer size %d: %s' % (s, str(msg)))
+            self.assertEquals(d, s)
+
+    def testTruncateOnWindows(self):
+        os.unlink(TESTFN)
+
+        def bug801631():
+            # SF bug <http://www.python.org/sf/801631>
+            # "file.truncate fault on windows"
+            f = open(TESTFN, 'wb')
+            f.write('12345678901')   # 11 bytes
+            f.close()
+
+            f = open(TESTFN,'rb+')
+            data = f.read(5)
+            if data != '12345':
+                self.fail("Read on file opened for update failed %r" % data)
+            if f.tell() != 5:
+                self.fail("File pos after read wrong %d" % f.tell())
+
+            f.truncate()
+            if f.tell() != 5:
+                self.fail("File pos after ftruncate wrong %d" % f.tell())
+
+            f.close()
+            size = os.path.getsize(TESTFN)
+            if size != 5:
+                self.fail("File size after ftruncate wrong %d" % size)
+
+        try:
+            bug801631()
+        finally:
+            os.unlink(TESTFN)
+
+    def testIteration(self):
+        # Test the complex interaction when mixing file-iteration and the
+        # various read* methods. Ostensibly, the mixture could just be tested
+        # to work when it should work according to the Python language,
+        # instead of fail when it should fail according to the current CPython
+        # implementation.  People don't always program Python the way they
+        # should, though, and the implemenation might change in subtle ways,
+        # so we explicitly test for errors, too; the test will just have to
+        # be updated when the implementation changes.
+        dataoffset = 16384
+        filler = "ham\n"
+        assert not dataoffset % len(filler), \
+            "dataoffset must be multiple of len(filler)"
+        nchunks = dataoffset // len(filler)
+        testlines = [
+            "spam, spam and eggs\n",
+            "eggs, spam, ham and spam\n",
+            "saussages, spam, spam and eggs\n",
+            "spam, ham, spam and eggs\n",
+            "spam, spam, spam, spam, spam, ham, spam\n",
+            "wonderful spaaaaaam.\n"
+        ]
+        methods = [("readline", ()), ("read", ()), ("readlines", ()),
+                   ("readinto", (array("c", " "*100),))]
+
+        try:
+            # Prepare the testfile
+            bag = open(TESTFN, "w")
+            bag.write(filler * nchunks)
+            bag.writelines(testlines)
+            bag.close()
+            # Test for appropriate errors mixing read* and iteration
+            for methodname, args in methods:
+                f = open(TESTFN)
+                if f.next() != filler:
+                    self.fail, "Broken testfile"
+                meth = getattr(f, methodname)
+                try:
+                    meth(*args)
+                except ValueError:
+                    pass
+                else:
+                    self.fail("%s%r after next() didn't raise ValueError" %
+                                     (methodname, args))
+                f.close()
+
+            # Test to see if harmless (by accident) mixing of read* and
+            # iteration still works. This depends on the size of the internal
+            # iteration buffer (currently 8192,) but we can test it in a
+            # flexible manner.  Each line in the bag o' ham is 4 bytes
+            # ("h", "a", "m", "\n"), so 4096 lines of that should get us
+            # exactly on the buffer boundary for any power-of-2 buffersize
+            # between 4 and 16384 (inclusive).
+            f = open(TESTFN)
+            for i in range(nchunks):
+                f.next()
+            testline = testlines.pop(0)
+            try:
+                line = f.readline()
+            except ValueError:
+                self.fail("readline() after next() with supposedly empty "
+                          "iteration-buffer failed anyway")
+            if line != testline:
+                self.fail("readline() after next() with empty buffer "
+                          "failed. Got %r, expected %r" % (line, testline))
+            testline = testlines.pop(0)
+            buf = array("c", "\x00" * len(testline))
+            try:
+                f.readinto(buf)
+            except ValueError:
+                self.fail("readinto() after next() with supposedly empty "
+                          "iteration-buffer failed anyway")
+            line = buf.tostring()
+            if line != testline:
+                self.fail("readinto() after next() with empty buffer "
+                          "failed. Got %r, expected %r" % (line, testline))
+
+            testline = testlines.pop(0)
+            try:
+                line = f.read(len(testline))
+            except ValueErrori
+                self.fail("read() after next() with supposedly empty "
+                          "iteration-buffer failed anyway")
+            if line != testline:
+                self.fail("read() after next() with empty buffer "
+                          "failed. Got %r, expected %r" % (line, testline))
+            try:
+                lines = f.readlines()
+            except ValueError:
+                self.fail("readlines() after next() with supposedly empty "
+                          "iteration-buffer failed anyway")
+            if lines != testlines:
+                self.fail("readlines() after next() with empty buffer "
+                          "failed. Got %r, expected %r" % (line, testline))
+            # Reading after iteration hit EOF shouldn't hurt either
+            f = open(TESTFN)
+            try:
+                for line in f:
+                    pass
+                try:
+                    f.readline()
+                    f.readinto(buf)
+                    f.read()
+                    f.readlines()
+                except ValueError:
+                    self.fail("read* failed after next() consumed file")
+            finally:
+                f.close()
+        finally:
+            os.unlink(TESTFN)
+
+
+def test_main():
+    # Historically, these tests have been sloppy about removing TESTFN.
+    # So get rid of it no matter what.
+    try:
+        run_unittest(AutoFileTests, OtherFileTests)
+    finally:
+        if os.path.exists(TESTFN):
+            os.unlink(TESTFN)
+
+if __name__ == '__main__':
+    test_main()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_format.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_format.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,250 @@
+from test.test_support import verbose, have_unicode, TestFailed
+import sys
+
+# test string formatting operator (I am not sure if this is being tested
+# elsewhere but, surely, some of the given cases are *not* tested because
+# they crash python)
+# test on unicode strings as well
+
+overflowok = 1
+
+def testformat(formatstr, args, output=None):
+    if verbose:
+        if output:
+            print "%s %% %s =? %s ..." %\
+                (repr(formatstr), repr(args), repr(output)),
+        else:
+            print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
+    try:
+        result = formatstr % args
+    except (OverflowError, MemoryError):
+        if not overflowok:
+            raise
+        if verbose:
+            print 'overflow (this is fine)'
+    else:
+        if output and result != output:
+            if verbose:
+                print 'no'
+            print "%s %% %s == %s != %s" %\
+                (repr(formatstr), repr(args), repr(result), repr(output))
+        else:
+            if verbose:
+                print 'yes'
+
+def testboth(formatstr, *args):
+    testformat(formatstr, *args)
+    if have_unicode:
+        testformat(unicode(formatstr), *args)
+
+
+testboth("%.1d", (1,), "1")
+testboth("%.*d", (sys.maxint,1))  # expect overflow
+testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
+testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
+testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
+
+testboth("%f", (1.0,), "1.000000")
+# these are trying to test the limits of the internal magic-number-length
+# formatting buffer, if that number changes then these tests are less
+# effective
+testboth("%#.*g", (109, -1.e+49/3.))
+testboth("%#.*g", (110, -1.e+49/3.))
+testboth("%#.*g", (110, -1.e+100/3.))
+
+# test some ridiculously large precision, expect overflow
+#Too slow on PyPy
+#testboth('%12.*f', (123456, 1.0))
+
+# Formatting of long integers. Overflow is not ok
+overflowok = 0
+testboth("%x", 10L, "a")
+testboth("%x", 100000000000L, "174876e800")
+testboth("%o", 10L, "12")
+testboth("%o", 100000000000L, "1351035564000")
+testboth("%d", 10L, "10")
+testboth("%d", 100000000000L, "100000000000")
+
+big = 123456789012345678901234567890L
+testboth("%d", big, "123456789012345678901234567890")
+testboth("%d", -big, "-123456789012345678901234567890")
+testboth("%5d", -big, "-123456789012345678901234567890")
+testboth("%31d", -big, "-123456789012345678901234567890")
+testboth("%32d", -big, " -123456789012345678901234567890")
+testboth("%-32d", -big, "-123456789012345678901234567890 ")
+testboth("%032d", -big, "-0123456789012345678901234567890")
+testboth("%-032d", -big, "-123456789012345678901234567890 ")
+testboth("%034d", -big, "-000123456789012345678901234567890")
+testboth("%034d", big, "0000123456789012345678901234567890")
+testboth("%0+34d", big, "+000123456789012345678901234567890")
+testboth("%+34d", big, "   +123456789012345678901234567890")
+testboth("%34d", big, "    123456789012345678901234567890")
+testboth("%.2d", big, "123456789012345678901234567890")
+testboth("%.30d", big, "123456789012345678901234567890")
+testboth("%.31d", big, "0123456789012345678901234567890")
+testboth("%32.31d", big, " 0123456789012345678901234567890")
+
+big = 0x1234567890abcdef12345L  # 21 hex digits
+testboth("%x", big, "1234567890abcdef12345")
+testboth("%x", -big, "-1234567890abcdef12345")
+testboth("%5x", -big, "-1234567890abcdef12345")
+testboth("%22x", -big, "-1234567890abcdef12345")
+testboth("%23x", -big, " -1234567890abcdef12345")
+testboth("%-23x", -big, "-1234567890abcdef12345 ")
+testboth("%023x", -big, "-01234567890abcdef12345")
+testboth("%-023x", -big, "-1234567890abcdef12345 ")
+testboth("%025x", -big, "-0001234567890abcdef12345")
+testboth("%025x", big, "00001234567890abcdef12345")
+testboth("%0+25x", big, "+0001234567890abcdef12345")
+testboth("%+25x", big, "   +1234567890abcdef12345")
+testboth("%25x", big, "    1234567890abcdef12345")
+testboth("%.2x", big, "1234567890abcdef12345")
+testboth("%.21x", big, "1234567890abcdef12345")
+testboth("%.22x", big, "01234567890abcdef12345")
+testboth("%23.22x", big, " 01234567890abcdef12345")
+testboth("%-23.22x", big, "01234567890abcdef12345 ")
+testboth("%X", big, "1234567890ABCDEF12345")
+testboth("%#X", big, "0X1234567890ABCDEF12345")
+testboth("%#x", big, "0x1234567890abcdef12345")
+testboth("%#x", -big, "-0x1234567890abcdef12345")
+testboth("%#.23x", -big, "-0x001234567890abcdef12345")
+testboth("%#+.23x", big, "+0x001234567890abcdef12345")
+testboth("%# .23x", big, " 0x001234567890abcdef12345")
+testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
+testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
+testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
+testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
+testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
+# next one gets two leading zeroes from precision, and another from the
+# 0 flag and the width
+testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
+# same, except no 0 flag
+testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
+
+big = 012345670123456701234567012345670L  # 32 octal digits
+testboth("%o", big, "12345670123456701234567012345670")
+testboth("%o", -big, "-12345670123456701234567012345670")
+testboth("%5o", -big, "-12345670123456701234567012345670")
+testboth("%33o", -big, "-12345670123456701234567012345670")
+testboth("%34o", -big, " -12345670123456701234567012345670")
+testboth("%-34o", -big, "-12345670123456701234567012345670 ")
+testboth("%034o", -big, "-012345670123456701234567012345670")
+testboth("%-034o", -big, "-12345670123456701234567012345670 ")
+testboth("%036o", -big, "-00012345670123456701234567012345670")
+testboth("%036o", big, "000012345670123456701234567012345670")
+testboth("%0+36o", big, "+00012345670123456701234567012345670")
+testboth("%+36o", big, "   +12345670123456701234567012345670")
+testboth("%36o", big, "    12345670123456701234567012345670")
+testboth("%.2o", big, "12345670123456701234567012345670")
+testboth("%.32o", big, "12345670123456701234567012345670")
+testboth("%.33o", big, "012345670123456701234567012345670")
+testboth("%34.33o", big, " 012345670123456701234567012345670")
+testboth("%-34.33o", big, "012345670123456701234567012345670 ")
+testboth("%o", big, "12345670123456701234567012345670")
+testboth("%#o", big, "012345670123456701234567012345670")
+testboth("%#o", -big, "-012345670123456701234567012345670")
+testboth("%#.34o", -big, "-0012345670123456701234567012345670")
+testboth("%#+.34o", big, "+0012345670123456701234567012345670")
+testboth("%# .34o", big, " 0012345670123456701234567012345670")
+testboth("%#+.34o", big, "+0012345670123456701234567012345670")
+testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
+testboth("%#-+37.34o", big, "+0012345670123456701234567012345670  ")
+testboth("%#+37.34o", big, "  +0012345670123456701234567012345670")
+# next one gets one leading zero from precision
+testboth("%.33o", big, "012345670123456701234567012345670")
+# base marker shouldn't change that, since "0" is redundant
+testboth("%#.33o", big, "012345670123456701234567012345670")
+# but reduce precision, and base marker should add a zero
+testboth("%#.32o", big, "012345670123456701234567012345670")
+# one leading zero from precision, and another from "0" flag & width
+testboth("%034.33o", big, "0012345670123456701234567012345670")
+# base marker shouldn't change that
+testboth("%0#34.33o", big, "0012345670123456701234567012345670")
+
+# Some small ints, in both Python int and long flavors).
+testboth("%d", 42, "42")
+testboth("%d", -42, "-42")
+testboth("%d", 42L, "42")
+testboth("%d", -42L, "-42")
+testboth("%#x", 1, "0x1")
+testboth("%#x", 1L, "0x1")
+testboth("%#X", 1, "0X1")
+testboth("%#X", 1L, "0X1")
+testboth("%#o", 1, "01")
+testboth("%#o", 1L, "01")
+testboth("%#o", 0, "0")
+testboth("%#o", 0L, "0")
+testboth("%o", 0, "0")
+testboth("%o", 0L, "0")
+testboth("%d", 0, "0")
+testboth("%d", 0L, "0")
+testboth("%#x", 0, "0x0")
+testboth("%#x", 0L, "0x0")
+testboth("%#X", 0, "0X0")
+testboth("%#X", 0L, "0X0")
+
+testboth("%x", 0x42, "42")
+testboth("%x", -0x42, "-42")
+testboth("%x", 0x42L, "42")
+testboth("%x", -0x42L, "-42")
+
+testboth("%o", 042, "42")
+testboth("%o", -042, "-42")
+testboth("%o", 042L, "42")
+testboth("%o", -042L, "-42")
+
+# Test exception for unknown format characters
+if verbose:
+    print 'Testing exceptions'
+
+def test_exc(formatstr, args, exception, excmsg):
+    try:
+        testformat(formatstr, args)
+    except exception, exc:
+        if str(exc) == excmsg:
+            if verbose:
+                print "yes"
+        else:
+            if verbose: print 'no'
+            print 'Unexpected ', exception, ':', repr(str(exc))
+    except:
+        if verbose: print 'no'
+        print 'Unexpected exception'
+        raise
+    else:
+        raise TestFailed, 'did not get expected exception: %s' % excmsg
+
+test_exc('abc %a', 1, ValueError,
+         "unsupported format character 'a' (0x61) at index 5")
+if have_unicode:
+    test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
+             "unsupported format character '?' (0x3000) at index 5")
+
+test_exc('%d', '1', TypeError, "int argument required")
+test_exc('%g', '1', TypeError, "float argument required")
+test_exc('no format', '1', TypeError,
+         "not all arguments converted during string formatting")
+test_exc('no format', u'1', TypeError,
+         "not all arguments converted during string formatting")
+test_exc(u'no format', '1', TypeError,
+         "not all arguments converted during string formatting")
+test_exc(u'no format', u'1', TypeError,
+         "not all arguments converted during string formatting")
+
+class Foobar(long):
+    def __oct__(self):
+        # Returning a non-string should not blow up.
+        return self + 1
+
+test_exc('%o', Foobar(), TypeError,
+         "expected string or Unicode object, long found")
+
+if sys.maxint == 2**31-1:
+    # crashes 2.2.1 and earlier:
+    try:
+        "%*d"%(sys.maxint, -127)
+    except (MemoryError, OverflowError):
+        pass  # CPython raises MemoryError, but both CPython and PyPy raise
+              # OverflowError for string concatenation
+    else:
+        raise TestFailed, '"%*d"%(sys.maxint, -127) should fail'

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_funcattrs.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_funcattrs.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,402 @@
+from test.test_support import verbose, TestFailed, verify
+import types
+
+class F:
+    def a(self):
+        pass
+
+def b():
+    'my docstring'
+    pass
+
+# __module__ is a special attribute
+verify(b.__module__ == __name__)
+verify(verify.__module__ == "test.test_support")
+
+# setting attributes on functions
+try:
+    b.publish
+except AttributeError: pass
+else: raise TestFailed, 'expected AttributeError'
+
+if b.__dict__ <> {}:
+    raise TestFailed, 'expected unassigned func.__dict__ to be {}'
+
+b.publish = 1
+if b.publish <> 1:
+    raise TestFailed, 'function attribute not set to expected value'
+
+docstring = 'its docstring'
+b.__doc__ = docstring
+if b.__doc__ <> docstring:
+    raise TestFailed, 'problem with setting __doc__ attribute'
+
+if 'publish' not in dir(b):
+    raise TestFailed, 'attribute not in dir()'
+
+try:
+    del b.__dict__
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+b.publish = 1
+try:
+    b.__dict__ = None
+except TypeError: pass
+else: raise TestFailed, 'func.__dict__ = None expected TypeError'
+
+d = {'hello': 'world'}
+b.__dict__ = d
+if b.func_dict is not d:
+    raise TestFailed, 'func.__dict__ assignment to dictionary failed'
+if b.hello <> 'world':
+    raise TestFailed, 'attribute after func.__dict__ assignment failed'
+
+f1 = F()
+f2 = F()
+
+try:
+    F.a.publish
+except AttributeError: pass
+else: raise TestFailed, 'expected AttributeError'
+
+try:
+    f1.a.publish
+except AttributeError: pass
+else: raise TestFailed, 'expected AttributeError'
+
+# In Python 2.1 beta 1, we disallowed setting attributes on unbound methods
+# (it was already disallowed on bound methods).  See the PEP for details.
+try:
+    F.a.publish = 1
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+# But setting it explicitly on the underlying function object is okay.
+F.a.im_func.publish = 1
+
+if F.a.publish <> 1:
+    raise TestFailed, 'unbound method attribute not set to expected value'
+
+if f1.a.publish <> 1:
+    raise TestFailed, 'bound method attribute access did not work'
+
+if f2.a.publish <> 1:
+    raise TestFailed, 'bound method attribute access did not work'
+
+if 'publish' not in dir(F.a):
+    raise TestFailed, 'attribute not in dir()'
+
+try:
+    f1.a.publish = 0
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+# See the comment above about the change in semantics for Python 2.1b1
+try:
+    F.a.myclass = F
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+F.a.im_func.myclass = F
+
+f1.a.myclass
+f2.a.myclass
+f1.a.myclass
+F.a.myclass
+
+if f1.a.myclass is not f2.a.myclass or \
+       f1.a.myclass is not F.a.myclass:
+    raise TestFailed, 'attributes were not the same'
+
+# try setting __dict__
+try:
+    F.a.__dict__ = (1, 2, 3)
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected TypeError or AttributeError'
+
+F.a.im_func.__dict__ = {'one': 11, 'two': 22, 'three': 33}
+
+if f1.a.two <> 22:
+    raise TestFailed, 'setting __dict__'
+
+from UserDict import UserDict
+d = UserDict({'four': 44, 'five': 55})
+
+try:
+    F.a.__dict__ = d
+except (AttributeError, TypeError): pass
+else: raise TestFailed
+
+if f2.a.one <> f1.a.one <> F.a.one <> 11:
+    raise TestFailed
+
+# im_func may not be a Python method!
+import new
+F.id = new.instancemethod(id, None, F)
+
+eff = F()
+if eff.id() <> id(eff):
+    raise TestFailed
+
+try:
+    F.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+try:
+    F.id.foo = 12
+except (AttributeError, TypeError): pass
+else: raise TestFailed
+
+try:
+    F.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+try:
+    eff.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+try:
+    eff.id.foo = 12
+except (AttributeError, TypeError): pass
+else: raise TestFailed
+
+try:
+    eff.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+# Regression test for a crash in pre-2.1a1
+def another():
+    pass
+
+try:
+    del another.__dict__
+except (TypeError, AttributeError): pass
+else: raise TestFailed, 'del another.__dict__ did not fail'
+
+try:
+    del another.func_dict
+except (TypeError, AttributeError): pass
+else: raise TestFailed, 'del another.func_dict did not fail'
+
+try:
+    another.func_dict = None
+except TypeError: pass
+else: raise TestFailed
+
+try:
+    del another.bar
+except AttributeError: pass
+else: raise TestFailed
+
+# This isn't specifically related to function attributes, but it does test a
+# core dump regression in funcobject.c
+del another.func_defaults
+
+def foo():
+    pass
+
+def bar():
+    pass
+
+def temp():
+    print 1
+
+if foo==bar:
+    raise TestFailed
+
+d={}
+d[foo] = 1
+
+foo.func_code = temp.func_code
+
+d[foo]
+
+# Test all predefined function attributes systematically
+
+def cantset(obj, name, value, exception=(AttributeError, TypeError)):
+    verify(hasattr(obj, name)) # Otherwise it's probably a typo
+    try:
+        setattr(obj, name, value)
+    except exception:
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to set %s to %r" % (name, value)
+    try:
+        delattr(obj, name)
+    except (AttributeError, TypeError):
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to del %s" % name
+
+def test_func_closure():
+    a = 12
+    def f(): print a
+    c = f.func_closure
+    verify(isinstance(c, tuple))
+    verify(len(c) == 1)
+    verify(c[0].__class__.__name__ == "cell") # don't have a type object handy
+    cantset(f, "func_closure", c)
+
+def test_func_doc():
+    def f(): pass
+    verify(f.__doc__ is None)
+    verify(f.func_doc is None)
+    f.__doc__ = "hello"
+    verify(f.__doc__ == "hello")
+    verify(f.func_doc == "hello")
+    del f.__doc__
+    verify(f.__doc__ is None)
+    verify(f.func_doc is None)
+    f.func_doc = "world"
+    verify(f.__doc__ == "world")
+    verify(f.func_doc == "world")
+    del f.func_doc
+    verify(f.func_doc is None)
+    verify(f.__doc__ is None)
+
+def test_func_globals():
+    def f(): pass
+    verify(f.func_globals is globals())
+    cantset(f, "func_globals", globals())
+
+def test_func_name():
+    def f(): pass
+    verify(f.__name__ == "f")
+    verify(f.func_name == "f")
+    f.__name__ = "g"
+    verify(f.__name__ == "g")
+    verify(f.func_name == "g")
+    f.func_name = "h"
+    verify(f.__name__ == "h")
+    verify(f.func_name == "h")
+    cantset(f, "func_globals", 1)
+    cantset(f, "__name__", 1)
+    # test that you can access func.__name__ in restricted mode
+    s = """def f(): pass\nf.__name__"""
+    exec s in {'__builtins__':{}}
+
+
+def test_func_code():
+    a = b = 24
+    def f(): pass
+    def g(): print 12
+    def f1(): print a
+    def g1(): print b
+    def f2(): print a, b
+    verify(type(f.func_code) is types.CodeType)
+    f.func_code = g.func_code
+    cantset(f, "func_code", None)
+    # can't change the number of free vars
+    cantset(f,  "func_code", f1.func_code, exception=ValueError)
+    cantset(f1, "func_code",  f.func_code, exception=ValueError)
+    cantset(f1, "func_code", f2.func_code, exception=ValueError)
+    f1.func_code = g1.func_code
+
+def test_func_defaults():
+    def f(a, b): return (a, b)
+    verify(f.func_defaults is None)
+    f.func_defaults = (1, 2)
+    verify(f.func_defaults == (1, 2))
+    verify(f(10) == (10, 2))
+    def g(a=1, b=2): return (a, b)
+    verify(g.func_defaults == (1, 2))
+    del g.func_defaults
+    verify(g.func_defaults is None)
+    try:
+        g()
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't be allowed to call g() w/o defaults"
+
+def test_func_dict():
+    def f(): pass
+    a = f.__dict__
+    b = f.func_dict
+    verify(a == {})
+    verify(a is b)
+    f.hello = 'world'
+    verify(a == {'hello': 'world'})
+    verify(f.func_dict is a is f.__dict__)
+    f.func_dict = {}
+    verify(not hasattr(f, "hello"))
+    f.__dict__ = {'world': 'hello'}
+    verify(f.world == "hello")
+    verify(f.__dict__ is f.func_dict == {'world': 'hello'})
+    cantset(f, "func_dict", None)
+    cantset(f, "__dict__", None)
+
+def test_im_class():
+    class C:
+        def foo(self): pass
+    verify(C.foo.im_class is C)
+    verify(C().foo.im_class is C)
+    cantset(C.foo, "im_class", C)
+    cantset(C().foo, "im_class", C)
+
+def test_im_func():
+    def foo(self): pass
+    class C:
+        pass
+    C.foo = foo
+    verify(C.foo.im_func is foo)
+    verify(C().foo.im_func is foo)
+    cantset(C.foo, "im_func", foo)
+    cantset(C().foo, "im_func", foo)
+
+def test_im_self():
+    class C:
+        def foo(self): pass
+    verify(C.foo.im_self is None)
+    c = C()
+    verify(c.foo.im_self is c)
+    cantset(C.foo, "im_self", None)
+    cantset(c.foo, "im_self", c)
+
+def test_im_dict():
+    class C:
+        def foo(self): pass
+        foo.bar = 42
+    verify(C.foo.__dict__ == {'bar': 42})
+    verify(C().foo.__dict__ == {'bar': 42})
+    cantset(C.foo, "__dict__", C.foo.__dict__)
+    cantset(C().foo, "__dict__", C.foo.__dict__)
+
+def test_im_doc():
+    class C:
+        def foo(self): "hello"
+    verify(C.foo.__doc__ == "hello")
+    verify(C().foo.__doc__ == "hello")
+    cantset(C.foo, "__doc__", "hello")
+    cantset(C().foo, "__doc__", "hello")
+
+def test_im_name():
+    class C:
+        def foo(self): pass
+    verify(C.foo.__name__ == "foo")
+    verify(C().foo.__name__ == "foo")
+    cantset(C.foo, "__name__", "foo")
+    cantset(C().foo, "__name__", "foo")
+
+def testmore():
+    test_func_closure()
+    test_func_doc()
+    test_func_globals()
+    test_func_name()
+    test_func_code()
+    test_func_defaults()
+    test_func_dict()
+    # Tests for instance method attributes
+    test_im_class()
+    test_im_func()
+    test_im_self()
+    test_im_dict()
+    test_im_doc()
+    test_im_name()
+
+testmore()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_generators.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_generators.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,1839 @@
+tutorial_tests = """
+Let's try a simple generator:
+
+    >>> def f():
+    ...    yield 1
+    ...    yield 2
+
+    >>> for i in f():
+    ...     print i
+    1
+    2
+    >>> g = f()
+    >>> g.next()
+    1
+    >>> g.next()
+    2
+
+"Falling off the end" stops the generator:
+
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<stdin>", line 2, in g
+    StopIteration
+
+"return" also stops the generator:
+
+    >>> def f():
+    ...     yield 1
+    ...     return
+    ...     yield 2 # never reached
+    ...
+    >>> g = f()
+    >>> g.next()
+    1
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<stdin>", line 3, in f
+    StopIteration
+    >>> g.next() # once stopped, can't be resumed
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+
+"raise StopIteration" stops the generator too:
+
+    >>> def f():
+    ...     yield 1
+    ...     raise StopIteration
+    ...     yield 2 # never reached
+    ...
+    >>> g = f()
+    >>> g.next()
+    1
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+
+However, they are not exactly equivalent:
+
+    >>> def g1():
+    ...     try:
+    ...         return
+    ...     except:
+    ...         yield 1
+    ...
+    >>> list(g1())
+    []
+
+    >>> def g2():
+    ...     try:
+    ...         raise StopIteration
+    ...     except:
+    ...         yield 42
+    >>> print list(g2())
+    [42]
+
+This may be surprising at first:
+
+    >>> def g3():
+    ...     try:
+    ...         return
+    ...     finally:
+    ...         yield 1
+    ...
+    >>> list(g3())
+    [1]
+
+Let's create an alternate range() function implemented as a generator:
+
+    >>> def yrange(n):
+    ...     for i in range(n):
+    ...         yield i
+    ...
+    >>> list(yrange(5))
+    [0, 1, 2, 3, 4]
+
+Generators always return to the most recent caller:
+
+    >>> def creator():
+    ...     r = yrange(5)
+    ...     print "creator", r.next()
+    ...     return r
+    ...
+    >>> def caller():
+    ...     r = creator()
+    ...     for i in r:
+    ...             print "caller", i
+    ...
+    >>> caller()
+    creator 0
+    caller 1
+    caller 2
+    caller 3
+    caller 4
+
+Generators can call other generators:
+
+    >>> def zrange(n):
+    ...     for i in yrange(n):
+    ...         yield i
+    ...
+    >>> list(zrange(5))
+    [0, 1, 2, 3, 4]
+
+"""
+
+# The examples from PEP 255.
+
+pep_tests = """
+
+Specification:  Yield
+
+    Restriction:  A generator cannot be resumed while it is actively
+    running:
+
+    >>> def g():
+    ...     i = me.next()
+    ...     yield i
+    >>> me = g()
+    >>> me.next()
+    Traceback (most recent call last):
+     ...
+      File "<string>", line 2, in g
+    ValueError: generator already executing
+
+Specification: Return
+
+    Note that return isn't always equivalent to raising StopIteration:  the
+    difference lies in how enclosing try/except constructs are treated.
+    For example,
+
+        >>> def f1():
+        ...     try:
+        ...         return
+        ...     except:
+        ...        yield 1
+        >>> print list(f1())
+        []
+
+    because, as in any function, return simply exits, but
+
+        >>> def f2():
+        ...     try:
+        ...         raise StopIteration
+        ...     except:
+        ...         yield 42
+        >>> print list(f2())
+        [42]
+
+    because StopIteration is captured by a bare "except", as is any
+    exception.
+
+Specification: Generators and Exception Propagation
+
+    >>> def f():
+    ...     return 1//0
+    >>> def g():
+    ...     yield f()  # the zero division exception propagates
+    ...     yield 42   # and we'll never get here
+    >>> k = g()
+    >>> k.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<stdin>", line 2, in g
+      File "<stdin>", line 2, in f
+    ZeroDivisionError: integer division by zero
+    >>> k.next()  # and the generator cannot be resumed
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+    >>>
+
+Specification: Try/Except/Finally
+
+    >>> def f():
+    ...     try:
+    ...         yield 1
+    ...         try:
+    ...             yield 2
+    ...             1//0
+    ...             yield 3  # never get here
+    ...         except ZeroDivisionError:
+    ...             yield 4
+    ...             yield 5
+    ...             raise
+    ...         except:
+    ...             yield 6
+    ...         yield 7     # the "raise" above stops this
+    ...     except:
+    ...         yield 8
+    ...     yield 9
+    ...     try:
+    ...         x = 12
+    ...     finally:
+    ...         yield 10
+    ...     yield 11
+    >>> print list(f())
+    [1, 2, 4, 5, 8, 9, 10, 11]
+    >>>
+
+Guido's binary tree example.
+
+    >>> # A binary tree class.
+    >>> class Tree:
+    ...
+    ...     def __init__(self, label, left=None, right=None):
+    ...         self.label = label
+    ...         self.left = left
+    ...         self.right = right
+    ...
+    ...     def __repr__(self, level=0, indent="    "):
+    ...         s = level*indent + repr(self.label)
+    ...         if self.left:
+    ...             s = s + "\\n" + self.left.__repr__(level+1, indent)
+    ...         if self.right:
+    ...             s = s + "\\n" + self.right.__repr__(level+1, indent)
+    ...         return s
+    ...
+    ...     def __iter__(self):
+    ...         return inorder(self)
+
+    >>> # Create a Tree from a list.
+    >>> def tree(list):
+    ...     n = len(list)
+    ...     if n == 0:
+    ...         return []
+    ...     i = n // 2
+    ...     return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
+
+    >>> # Show it off: create a tree.
+    >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+    >>> # A recursive generator that generates Tree labels in in-order.
+    >>> def inorder(t):
+    ...     if t:
+    ...         for x in inorder(t.left):
+    ...             yield x
+    ...         yield t.label
+    ...         for x in inorder(t.right):
+    ...             yield x
+
+    >>> # Show it off: create a tree.
+    >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+    >>> # Print the nodes of the tree in in-order.
+    >>> for x in t:
+    ...     print x,
+    A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
+
+    >>> # A non-recursive generator.
+    >>> def inorder(node):
+    ...     stack = []
+    ...     while node:
+    ...         while node.left:
+    ...             stack.append(node)
+    ...             node = node.left
+    ...         yield node.label
+    ...         while not node.right:
+    ...             try:
+    ...                 node = stack.pop()
+    ...             except IndexError:
+    ...                 return
+    ...             yield node.label
+    ...         node = node.right
+
+    >>> # Exercise the non-recursive generator.
+    >>> for x in t:
+    ...     print x,
+    A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
+
+"""
+
+# Examples from Iterator-List and Python-Dev and c.l.py.
+
+email_tests = """
+
+The difference between yielding None and returning it.
+
+>>> def g():
+...     for i in range(3):
+...         yield None
+...     yield None
+...     return
+>>> list(g())
+[None, None, None, None]
+
+Ensure that explicitly raising StopIteration acts like any other exception
+in try/except, not like a return.
+
+>>> def g():
+...     yield 1
+...     try:
+...         raise StopIteration
+...     except:
+...         yield 2
+...     yield 3
+>>> list(g())
+[1, 2, 3]
+
+Next one was posted to c.l.py.
+
+>>> def gcomb(x, k):
+...     "Generate all combinations of k elements from list x."
+...
+...     if k > len(x):
+...         return
+...     if k == 0:
+...         yield []
+...     else:
+...         first, rest = x[0], x[1:]
+...         # A combination does or doesn't contain first.
+...         # If it does, the remainder is a k-1 comb of rest.
+...         for c in gcomb(rest, k-1):
+...             c.insert(0, first)
+...             yield c
+...         # If it doesn't contain first, it's a k comb of rest.
+...         for c in gcomb(rest, k):
+...             yield c
+
+>>> seq = range(1, 5)
+>>> for k in range(len(seq) + 2):
+...     print "%d-combs of %s:" % (k, seq)
+...     for c in gcomb(seq, k):
+...         print "   ", c
+0-combs of [1, 2, 3, 4]:
+    []
+1-combs of [1, 2, 3, 4]:
+    [1]
+    [2]
+    [3]
+    [4]
+2-combs of [1, 2, 3, 4]:
+    [1, 2]
+    [1, 3]
+    [1, 4]
+    [2, 3]
+    [2, 4]
+    [3, 4]
+3-combs of [1, 2, 3, 4]:
+    [1, 2, 3]
+    [1, 2, 4]
+    [1, 3, 4]
+    [2, 3, 4]
+4-combs of [1, 2, 3, 4]:
+    [1, 2, 3, 4]
+5-combs of [1, 2, 3, 4]:
+
+From the Iterators list, about the types of these things.
+
+>>> def g():
+...     yield 1
+...
+>>> type(g)
+<type 'function'>
+>>> i = g()
+>>> type(i)
+<type 'generator'>
+>>> [s for s in dir(i) if not s.startswith('_')]
+['close', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
+>>> print i.next.__doc__
+x.next() -> the next value, or raise StopIteration
+>>> iter(i) is i
+True
+>>> import types
+>>> isinstance(i, types.GeneratorType)
+True
+
+And more, added later.
+
+>>> i.gi_running
+0
+>>> type(i.gi_frame)
+<type 'frame'>
+>>> i.gi_running = 42
+Traceback (most recent call last):
+  ...
+TypeError: readonly attribute
+>>> def g():
+...     yield me.gi_running
+>>> me = g()
+>>> me.gi_running
+0
+>>> me.next()
+1
+>>> me.gi_running
+0
+
+A clever union-find implementation from c.l.py, due to David Eppstein.
+Sent: Friday, June 29, 2001 12:16 PM
+To: python-list at python.org
+Subject: Re: PEP 255: Simple Generators
+
+>>> class disjointSet:
+...     def __init__(self, name):
+...         self.name = name
+...         self.parent = None
+...         self.generator = self.generate()
+...
+...     def generate(self):
+...         while not self.parent:
+...             yield self
+...         for x in self.parent.generator:
+...             yield x
+...
+...     def find(self):
+...         return self.generator.next()
+...
+...     def union(self, parent):
+...         if self.parent:
+...             raise ValueError("Sorry, I'm not a root!")
+...         self.parent = parent
+...
+...     def __str__(self):
+...         return self.name
+
+>>> names = "ABCDEFGHIJKLM"
+>>> sets = [disjointSet(name) for name in names]
+>>> roots = sets[:]
+
+>>> import random
+>>> gen = random.WichmannHill(42)
+>>> while 1:
+...     for s in sets:
+...         print "%s->%s" % (s, s.find()),
+...     print
+...     if len(roots) > 1:
+...         s1 = gen.choice(roots)
+...         roots.remove(s1)
+...         s2 = gen.choice(roots)
+...         s1.union(s2)
+...         print "merged", s1, "into", s2
+...     else:
+...         break
+A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
+merged D into G
+A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
+merged C into F
+A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
+merged L into A
+A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
+merged H into E
+A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
+merged B into E
+A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
+merged J into G
+A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
+merged E into G
+A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
+merged M into G
+A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
+merged I into K
+A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
+merged K into A
+A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
+merged F into A
+A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
+merged A into G
+A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
+
+"""
+# Emacs turd '
+
+# Fun tests (for sufficiently warped notions of "fun").
+
+fun_tests = """
+
+Build up to a recursive Sieve of Eratosthenes generator.
+
+>>> def firstn(g, n):
+...     return [g.next() for i in range(n)]
+
+>>> def intsfrom(i):
+...     while 1:
+...         yield i
+...         i += 1
+
+>>> firstn(intsfrom(5), 7)
+[5, 6, 7, 8, 9, 10, 11]
+
+>>> def exclude_multiples(n, ints):
+...     for i in ints:
+...         if i % n:
+...             yield i
+
+>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
+[1, 2, 4, 5, 7, 8]
+
+>>> def sieve(ints):
+...     prime = ints.next()
+...     yield prime
+...     not_divisible_by_prime = exclude_multiples(prime, ints)
+...     for p in sieve(not_divisible_by_prime):
+...         yield p
+
+>>> primes = sieve(intsfrom(2))
+>>> firstn(primes, 20)
+[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
+
+
+Another famous problem:  generate all integers of the form
+    2**i * 3**j  * 5**k
+in increasing order, where i,j,k >= 0.  Trickier than it may look at first!
+Try writing it without generators, and correctly, and without generating
+3 internal results for each result output.
+
+>>> def times(n, g):
+...     for i in g:
+...         yield n * i
+>>> firstn(times(10, intsfrom(1)), 10)
+[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
+
+>>> def merge(g, h):
+...     ng = g.next()
+...     nh = h.next()
+...     while 1:
+...         if ng < nh:
+...             yield ng
+...             ng = g.next()
+...         elif ng > nh:
+...             yield nh
+...             nh = h.next()
+...         else:
+...             yield ng
+...             ng = g.next()
+...             nh = h.next()
+
+The following works, but is doing a whale of a lot of redundant work --
+it's not clear how to get the internal uses of m235 to share a single
+generator.  Note that me_times2 (etc) each need to see every element in the
+result sequence.  So this is an example where lazy lists are more natural
+(you can look at the head of a lazy list any number of times).
+
+>>> def m235():
+...     yield 1
+...     me_times2 = times(2, m235())
+...     me_times3 = times(3, m235())
+...     me_times5 = times(5, m235())
+...     for i in merge(merge(me_times2,
+...                          me_times3),
+...                    me_times5):
+...         yield i
+
+Don't print "too many" of these -- the implementation above is extremely
+inefficient:  each call of m235() leads to 3 recursive calls, and in
+turn each of those 3 more, and so on, and so on, until we've descended
+enough levels to satisfy the print stmts.  Very odd:  when I printed 5
+lines of results below, this managed to screw up Win98's malloc in "the
+usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
+address space, and it *looked* like a very slow leak.
+
+>>> result = m235()
+>>> for i in range(3):
+...     print firstn(result, 15)
+[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
+[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
+[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
+
+Heh.  Here's one way to get a shared list, complete with an excruciating
+namespace renaming trick.  The *pretty* part is that the times() and merge()
+functions can be reused as-is, because they only assume their stream
+arguments are iterable -- a LazyList is the same as a generator to times().
+
+>>> class LazyList:
+...     def __init__(self, g):
+...         self.sofar = []
+...         self.fetch = g.next
+...
+...     def __getitem__(self, i):
+...         sofar, fetch = self.sofar, self.fetch
+...         while i >= len(sofar):
+...             sofar.append(fetch())
+...         return sofar[i]
+
+>>> def m235():
+...     yield 1
+...     # Gack:  m235 below actually refers to a LazyList.
+...     me_times2 = times(2, m235)
+...     me_times3 = times(3, m235)
+...     me_times5 = times(5, m235)
+...     for i in merge(merge(me_times2,
+...                          me_times3),
+...                    me_times5):
+...         yield i
+
+Print as many of these as you like -- *this* implementation is memory-
+efficient.
+
+>>> m235 = LazyList(m235())
+>>> for i in range(5):
+...     print [m235[j] for j in range(15*i, 15*(i+1))]
+[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
+[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
+[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
+[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
+[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
+
+Ye olde Fibonacci generator, LazyList style.
+
+>>> def fibgen(a, b):
+...
+...     def sum(g, h):
+...         while 1:
+...             yield g.next() + h.next()
+...
+...     def tail(g):
+...         g.next()    # throw first away
+...         for x in g:
+...             yield x
+...
+...     yield a
+...     yield b
+...     for s in sum(iter(fib),
+...                  tail(iter(fib))):
+...         yield s
+
+>>> fib = LazyList(fibgen(1, 2))
+>>> firstn(iter(fib), 17)
+[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
+
+
+Running after your tail with itertools.tee (new in version 2.4)
+
+The algorithms "m235" (Hamming) and Fibonacci presented above are both
+examples of a whole family of FP (functional programming) algorithms
+where a function produces and returns a list while the production algorithm
+suppose the list as already produced by recursively calling itself.
+For these algorithms to work, they must:
+
+- produce at least a first element without presupposing the existence of
+  the rest of the list
+- produce their elements in a lazy manner
+
+To work efficiently, the beginning of the list must not be recomputed over
+and over again. This is ensured in most FP languages as a built-in feature.
+In python, we have to explicitly maintain a list of already computed results
+and abandon genuine recursivity.
+
+This is what had been attempted above with the LazyList class. One problem
+with that class is that it keeps a list of all of the generated results and
+therefore continually grows. This partially defeats the goal of the generator
+concept, viz. produce the results only as needed instead of producing them
+all and thereby wasting memory.
+
+Thanks to itertools.tee, it is now clear "how to get the internal uses of
+m235 to share a single generator".
+
+>>> from itertools import tee
+>>> def m235():
+...     def _m235():
+...         yield 1
+...         for n in merge(times(2, m2),
+...                        merge(times(3, m3),
+...                              times(5, m5))):
+...             yield n
+...     m1 = _m235()
+...     m2, m3, m5, mRes = tee(m1, 4)
+...     return mRes
+
+>>> it = m235()
+>>> for i in range(5):
+...     print firstn(it, 15)
+[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
+[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
+[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
+[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
+[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
+
+The "tee" function does just what we want. It internally keeps a generated
+result for as long as it has not been "consumed" from all of the duplicated
+iterators, whereupon it is deleted. You can therefore print the hamming
+sequence during hours without increasing memory usage, or very little.
+
+The beauty of it is that recursive running-after-their-tail FP algorithms
+are quite straightforwardly expressed with this Python idiom.
+
+Ye olde Fibonacci generator, tee style.
+
+>>> def fib():
+...
+...     def _isum(g, h):
+...         while 1:
+...             yield g.next() + h.next()
+...
+...     def _fib():
+...         yield 1
+...         yield 2
+...         fibTail.next() # throw first away
+...         for res in _isum(fibHead, fibTail):
+...             yield res
+...
+...     realfib = _fib()
+...     fibHead, fibTail, fibRes = tee(realfib, 3)
+...     return fibRes
+
+>>> firstn(fib(), 17)
+[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
+
+"""
+
+# syntax_tests mostly provokes SyntaxErrors.  Also fiddling with #if 0
+# hackery.
+
+syntax_tests = """
+
+>>> def f():
+...     return 22
+...     yield 1
+Traceback (most recent call last):
+  ..
+SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3)
+
+>>> def f():
+...     yield 1
+...     return 22
+Traceback (most recent call last):
+  ..
+SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3)
+
+"return None" is not the same as "return" in a generator:
+
+>>> def f():
+...     yield 1
+...     return None
+Traceback (most recent call last):
+  ..
+SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3)
+
+These are fine:
+
+>>> def f():
+...     yield 1
+...     return
+
+>>> def f():
+...     try:
+...         yield 1
+...     finally:
+...         pass
+
+>>> def f():
+...     try:
+...         try:
+...             1//0
+...         except ZeroDivisionError:
+...             yield 666
+...         except:
+...             pass
+...     finally:
+...         pass
+
+>>> def f():
+...     try:
+...         try:
+...             yield 12
+...             1//0
+...         except ZeroDivisionError:
+...             yield 666
+...         except:
+...             try:
+...                 x = 12
+...             finally:
+...                 yield 12
+...     except:
+...         return
+>>> list(f())
+[12, 666]
+
+>>> def f():
+...    yield
+>>> type(f())
+<type 'generator'>
+
+
+>>> def f():
+...    if 0:
+...        yield
+>>> type(f())
+<type 'generator'>
+
+
+>>> def f():
+...     if 0:
+...         yield 1
+>>> type(f())
+<type 'generator'>
+
+>>> def f():
+...    if "":
+...        yield None
+>>> type(f())
+<type 'generator'>
+
+>>> def f():
+...     return
+...     try:
+...         if x==4:
+...             pass
+...         elif 0:
+...             try:
+...                 1//0
+...             except SyntaxError:
+...                 pass
+...             else:
+...                 if 0:
+...                     while 12:
+...                         x += 1
+...                         yield 2 # don't blink
+...                         f(a, b, c, d, e)
+...         else:
+...             pass
+...     except:
+...         x = 1
+...     return
+>>> type(f())
+<type 'generator'>
+
+>>> def f():
+...     if 0:
+...         def g():
+...             yield 1
+...
+>>> type(f())
+<type 'NoneType'>
+
+>>> def f():
+...     if 0:
+...         class C:
+...             def __init__(self):
+...                 yield 1
+...             def f(self):
+...                 yield 2
+>>> type(f())
+<type 'NoneType'>
+
+>>> def f():
+...     if 0:
+...         return
+...     if 0:
+...         yield 2
+>>> type(f())
+<type 'generator'>
+
+
+>>> def f():
+...     if 0:
+...         lambda x:  x        # shouldn't trigger here
+...         return              # or here
+...         def f(i):
+...             return 2*i      # or here
+...         if 0:
+...             return 3        # but *this* sucks (line 8)
+...     if 0:
+...         yield 2             # because it's a generator (line 10)
+Traceback (most recent call last):
+SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10)
+
+This one caused a crash (see SF bug 567538):
+
+>>> def f():
+...     for i in range(3):
+...         try:
+...             continue
+...         finally:
+...             yield i
+...
+>>> g = f()
+>>> print g.next()
+0
+>>> print g.next()
+1
+>>> print g.next()
+2
+>>> print g.next()
+Traceback (most recent call last):
+StopIteration
+"""
+
+# conjoin is a simple backtracking generator, named in honor of Icon's
+# "conjunction" control structure.  Pass a list of no-argument functions
+# that return iterable objects.  Easiest to explain by example:  assume the
+# function list [x, y, z] is passed.  Then conjoin acts like:
+#
+# def g():
+#     values = [None] * 3
+#     for values[0] in x():
+#         for values[1] in y():
+#             for values[2] in z():
+#                 yield values
+#
+# So some 3-lists of values *may* be generated, each time we successfully
+# get into the innermost loop.  If an iterator fails (is exhausted) before
+# then, it "backtracks" to get the next value from the nearest enclosing
+# iterator (the one "to the left"), and starts all over again at the next
+# slot (pumps a fresh iterator).  Of course this is most useful when the
+# iterators have side-effects, so that which values *can* be generated at
+# each slot depend on the values iterated at previous slots.
+
+def conjoin(gs):
+
+    values = [None] * len(gs)
+
+    def gen(i, values=values):
+        if i >= len(gs):
+            yield values
+        else:
+            for values[i] in gs[i]():
+                for x in gen(i+1):
+                    yield x
+
+    for x in gen(0):
+        yield x
+
+# That works fine, but recursing a level and checking i against len(gs) for
+# each item produced is inefficient.  By doing manual loop unrolling across
+# generator boundaries, it's possible to eliminate most of that overhead.
+# This isn't worth the bother *in general* for generators, but conjoin() is
+# a core building block for some CPU-intensive generator applications.
+
+def conjoin(gs):
+
+    n = len(gs)
+    values = [None] * n
+
+    # Do one loop nest at time recursively, until the # of loop nests
+    # remaining is divisible by 3.
+
+    def gen(i, values=values):
+        if i >= n:
+            yield values
+
+        elif (n-i) % 3:
+            ip1 = i+1
+            for values[i] in gs[i]():
+                for x in gen(ip1):
+                    yield x
+
+        else:
+            for x in _gen3(i):
+                yield x
+
+    # Do three loop nests at a time, recursing only if at least three more
+    # remain.  Don't call directly:  this is an internal optimization for
+    # gen's use.
+
+    def _gen3(i, values=values):
+        assert i < n and (n-i) % 3 == 0
+        ip1, ip2, ip3 = i+1, i+2, i+3
+        g, g1, g2 = gs[i : ip3]
+
+        if ip3 >= n:
+            # These are the last three, so we can yield values directly.
+            for values[i] in g():
+                for values[ip1] in g1():
+                    for values[ip2] in g2():
+                        yield values
+
+        else:
+            # At least 6 loop nests remain; peel off 3 and recurse for the
+            # rest.
+            for values[i] in g():
+                for values[ip1] in g1():
+                    for values[ip2] in g2():
+                        for x in _gen3(ip3):
+                            yield x
+
+    for x in gen(0):
+        yield x
+
+# And one more approach:  For backtracking apps like the Knight's Tour
+# solver below, the number of backtracking levels can be enormous (one
+# level per square, for the Knight's Tour, so that e.g. a 100x100 board
+# needs 10,000 levels).  In such cases Python is likely to run out of
+# stack space due to recursion.  So here's a recursion-free version of
+# conjoin too.
+# NOTE WELL:  This allows large problems to be solved with only trivial
+# demands on stack space.  Without explicitly resumable generators, this is
+# much harder to achieve.  OTOH, this is much slower (up to a factor of 2)
+# than the fancy unrolled recursive conjoin.
+
+def flat_conjoin(gs):  # rename to conjoin to run tests with this instead
+    n = len(gs)
+    values = [None] * n
+    iters  = [None] * n
+    _StopIteration = StopIteration  # make local because caught a *lot*
+    i = 0
+    while 1:
+        # Descend.
+        try:
+            while i < n:
+                it = iters[i] = gs[i]().next
+                values[i] = it()
+                i += 1
+        except _StopIteration:
+            pass
+        else:
+            assert i == n
+            yield values
+
+        # Backtrack until an older iterator can be resumed.
+        i -= 1
+        while i >= 0:
+            try:
+                values[i] = iters[i]()
+                # Success!  Start fresh at next level.
+                i += 1
+                break
+            except _StopIteration:
+                # Continue backtracking.
+                i -= 1
+        else:
+            assert i < 0
+            break
+
+# A conjoin-based N-Queens solver.
+
+class Queens:
+    def __init__(self, n):
+        self.n = n
+        rangen = range(n)
+
+        # Assign a unique int to each column and diagonal.
+        # columns:  n of those, range(n).
+        # NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
+        # each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
+        # based.
+        # NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
+        # each, smallest i+j is 0, largest is 2n-2.
+
+        # For each square, compute a bit vector of the columns and
+        # diagonals it covers, and for each row compute a function that
+        # generates the possiblities for the columns in that row.
+        self.rowgenerators = []
+        for i in rangen:
+            rowuses = [(1L << j) |                  # column ordinal
+                       (1L << (n + i-j + n-1)) |    # NW-SE ordinal
+                       (1L << (n + 2*n-1 + i+j))    # NE-SW ordinal
+                            for j in rangen]
+
+            def rowgen(rowuses=rowuses):
+                for j in rangen:
+                    uses = rowuses[j]
+                    if uses & self.used == 0:
+                        self.used |= uses
+                        yield j
+                        self.used &= ~uses
+
+            self.rowgenerators.append(rowgen)
+
+    # Generate solutions.
+    def solve(self):
+        self.used = 0
+        for row2col in conjoin(self.rowgenerators):
+            yield row2col
+
+    def printsolution(self, row2col):
+        n = self.n
+        assert n == len(row2col)
+        sep = "+" + "-+" * n
+        print sep
+        for i in range(n):
+            squares = [" " for j in range(n)]
+            squares[row2col[i]] = "Q"
+            print "|" + "|".join(squares) + "|"
+            print sep
+
+# A conjoin-based Knight's Tour solver.  This is pretty sophisticated
+# (e.g., when used with flat_conjoin above, and passing hard=1 to the
+# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
+# creating 10s of thousands of generators then!), and is lengthy.
+
+class Knights:
+    def __init__(self, m, n, hard=0):
+        self.m, self.n = m, n
+
+        # solve() will set up succs[i] to be a list of square #i's
+        # successors.
+        succs = self.succs = []
+
+        # Remove i0 from each of its successor's successor lists, i.e.
+        # successors can't go back to i0 again.  Return 0 if we can
+        # detect this makes a solution impossible, else return 1.
+
+        def remove_from_successors(i0, len=len):
+            # If we remove all exits from a free square, we're dead:
+            # even if we move to it next, we can't leave it again.
+            # If we create a square with one exit, we must visit it next;
+            # else somebody else will have to visit it, and since there's
+            # only one adjacent, there won't be a way to leave it again.
+            # Finelly, if we create more than one free square with a
+            # single exit, we can only move to one of them next, leaving
+            # the other one a dead end.
+            ne0 = ne1 = 0
+            for i in succs[i0]:
+                s = succs[i]
+                s.remove(i0)
+                e = len(s)
+                if e == 0:
+                    ne0 += 1
+                elif e == 1:
+                    ne1 += 1
+            return ne0 == 0 and ne1 < 2
+
+        # Put i0 back in each of its successor's successor lists.
+
+        def add_to_successors(i0):
+            for i in succs[i0]:
+                succs[i].append(i0)
+
+        # Generate the first move.
+        def first():
+            if m < 1 or n < 1:
+                return
+
+            # Since we're looking for a cycle, it doesn't matter where we
+            # start.  Starting in a corner makes the 2nd move easy.
+            corner = self.coords2index(0, 0)
+            remove_from_successors(corner)
+            self.lastij = corner
+            yield corner
+            add_to_successors(corner)
+
+        # Generate the second moves.
+        def second():
+            corner = self.coords2index(0, 0)
+            assert self.lastij == corner  # i.e., we started in the corner
+            if m < 3 or n < 3:
+                return
+            assert len(succs[corner]) == 2
+            assert self.coords2index(1, 2) in succs[corner]
+            assert self.coords2index(2, 1) in succs[corner]
+            # Only two choices.  Whichever we pick, the other must be the
+            # square picked on move m*n, as it's the only way to get back
+            # to (0, 0).  Save its index in self.final so that moves before
+            # the last know it must be kept free.
+            for i, j in (1, 2), (2, 1):
+                this  = self.coords2index(i, j)
+                final = self.coords2index(3-i, 3-j)
+                self.final = final
+
+                remove_from_successors(this)
+                succs[final].append(corner)
+                self.lastij = this
+                yield this
+                succs[final].remove(corner)
+                add_to_successors(this)
+
+        # Generate moves 3 thru m*n-1.
+        def advance(len=len):
+            # If some successor has only one exit, must take it.
+            # Else favor successors with fewer exits.
+            candidates = []
+            for i in succs[self.lastij]:
+                e = len(succs[i])
+                assert e > 0, "else remove_from_successors() pruning flawed"
+                if e == 1:
+                    candidates = [(e, i)]
+                    break
+                candidates.append((e, i))
+            else:
+                candidates.sort()
+
+            for e, i in candidates:
+                if i != self.final:
+                    if remove_from_successors(i):
+                        self.lastij = i
+                        yield i
+                    add_to_successors(i)
+
+        # Generate moves 3 thru m*n-1.  Alternative version using a
+        # stronger (but more expensive) heuristic to order successors.
+        # Since the # of backtracking levels is m*n, a poor move early on
+        # can take eons to undo.  Smallest square board for which this
+        # matters a lot is 52x52.
+        def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
+            # If some successor has only one exit, must take it.
+            # Else favor successors with fewer exits.
+            # Break ties via max distance from board centerpoint (favor
+            # corners and edges whenever possible).
+            candidates = []
+            for i in succs[self.lastij]:
+                e = len(succs[i])
+                assert e > 0, "else remove_from_successors() pruning flawed"
+                if e == 1:
+                    candidates = [(e, 0, i)]
+                    break
+                i1, j1 = self.index2coords(i)
+                d = (i1 - vmid)**2 + (j1 - hmid)**2
+                candidates.append((e, -d, i))
+            else:
+                candidates.sort()
+
+            for e, d, i in candidates:
+                if i != self.final:
+                    if remove_from_successors(i):
+                        self.lastij = i
+                        yield i
+                    add_to_successors(i)
+
+        # Generate the last move.
+        def last():
+            assert self.final in succs[self.lastij]
+            yield self.final
+
+        if m*n < 4:
+            self.squaregenerators = [first]
+        else:
+            self.squaregenerators = [first, second] + \
+                [hard and advance_hard or advance] * (m*n - 3) + \
+                [last]
+
+    def coords2index(self, i, j):
+        assert 0 <= i < self.m
+        assert 0 <= j < self.n
+        return i * self.n + j
+
+    def index2coords(self, index):
+        assert 0 <= index < self.m * self.n
+        return divmod(index, self.n)
+
+    def _init_board(self):
+        succs = self.succs
+        del succs[:]
+        m, n = self.m, self.n
+        c2i = self.coords2index
+
+        offsets = [( 1,  2), ( 2,  1), ( 2, -1), ( 1, -2),
+                   (-1, -2), (-2, -1), (-2,  1), (-1,  2)]
+        rangen = range(n)
+        for i in range(m):
+            for j in rangen:
+                s = [c2i(i+io, j+jo) for io, jo in offsets
+                                     if 0 <= i+io < m and
+                                        0 <= j+jo < n]
+                succs.append(s)
+
+    # Generate solutions.
+    def solve(self):
+        self._init_board()
+        for x in conjoin(self.squaregenerators):
+            yield x
+
+    def printsolution(self, x):
+        m, n = self.m, self.n
+        assert len(x) == m*n
+        w = len(str(m*n))
+        format = "%" + str(w) + "d"
+
+        squares = [[None] * n for i in range(m)]
+        k = 1
+        for i in x:
+            i1, j1 = self.index2coords(i)
+            squares[i1][j1] = format % k
+            k += 1
+
+        sep = "+" + ("-" * w + "+") * n
+        print sep
+        for i in range(m):
+            row = squares[i]
+            print "|" + "|".join(row) + "|"
+            print sep
+
+conjoin_tests = """
+
+Generate the 3-bit binary numbers in order.  This illustrates dumbest-
+possible use of conjoin, just to generate the full cross-product.
+
+>>> for c in conjoin([lambda: iter((0, 1))] * 3):
+...     print c
+[0, 0, 0]
+[0, 0, 1]
+[0, 1, 0]
+[0, 1, 1]
+[1, 0, 0]
+[1, 0, 1]
+[1, 1, 0]
+[1, 1, 1]
+
+For efficiency in typical backtracking apps, conjoin() yields the same list
+object each time.  So if you want to save away a full account of its
+generated sequence, you need to copy its results.
+
+>>> def gencopy(iterator):
+...     for x in iterator:
+...         yield x[:]
+
+>>> for n in range(10):
+...     all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
+...     print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
+0 1 True True
+1 2 True True
+2 4 True True
+3 8 True True
+4 16 True True
+5 32 True True
+6 64 True True
+7 128 True True
+8 256 True True
+9 512 True True
+
+And run an 8-queens solver.
+
+>>> q = Queens(8)
+>>> LIMIT = 2
+>>> count = 0
+>>> for row2col in q.solve():
+...     count += 1
+...     if count <= LIMIT:
+...         print "Solution", count
+...         q.printsolution(row2col)
+Solution 1
++-+-+-+-+-+-+-+-+
+|Q| | | | | | | |
++-+-+-+-+-+-+-+-+
+| | | | |Q| | | |
++-+-+-+-+-+-+-+-+
+| | | | | | | |Q|
++-+-+-+-+-+-+-+-+
+| | | | | |Q| | |
++-+-+-+-+-+-+-+-+
+| | |Q| | | | | |
++-+-+-+-+-+-+-+-+
+| | | | | | |Q| |
++-+-+-+-+-+-+-+-+
+| |Q| | | | | | |
++-+-+-+-+-+-+-+-+
+| | | |Q| | | | |
++-+-+-+-+-+-+-+-+
+Solution 2
++-+-+-+-+-+-+-+-+
+|Q| | | | | | | |
++-+-+-+-+-+-+-+-+
+| | | | | |Q| | |
++-+-+-+-+-+-+-+-+
+| | | | | | | |Q|
++-+-+-+-+-+-+-+-+
+| | |Q| | | | | |
++-+-+-+-+-+-+-+-+
+| | | | | | |Q| |
++-+-+-+-+-+-+-+-+
+| | | |Q| | | | |
++-+-+-+-+-+-+-+-+
+| |Q| | | | | | |
++-+-+-+-+-+-+-+-+
+| | | | |Q| | | |
++-+-+-+-+-+-+-+-+
+
+>>> print count, "solutions in all."
+92 solutions in all.
+
+And run a Knight's Tour on a 10x10 board.  Note that there are about
+20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
+
+>>> k = Knights(10, 10)
+>>> LIMIT = 2
+>>> count = 0
+>>> for x in k.solve():
+...     count += 1
+...     if count <= LIMIT:
+...         print "Solution", count
+...         k.printsolution(x)
+...     else:
+...         break
+Solution 1
++---+---+---+---+---+---+---+---+---+---+
+|  1| 58| 27| 34|  3| 40| 29| 10|  5|  8|
++---+---+---+---+---+---+---+---+---+---+
+| 26| 35|  2| 57| 28| 33|  4|  7| 30| 11|
++---+---+---+---+---+---+---+---+---+---+
+| 59|100| 73| 36| 41| 56| 39| 32|  9|  6|
++---+---+---+---+---+---+---+---+---+---+
+| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
++---+---+---+---+---+---+---+---+---+---+
+| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
++---+---+---+---+---+---+---+---+---+---+
+| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
++---+---+---+---+---+---+---+---+---+---+
+| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
++---+---+---+---+---+---+---+---+---+---+
+| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
++---+---+---+---+---+---+---+---+---+---+
+| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
++---+---+---+---+---+---+---+---+---+---+
+| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
++---+---+---+---+---+---+---+---+---+---+
+Solution 2
++---+---+---+---+---+---+---+---+---+---+
+|  1| 58| 27| 34|  3| 40| 29| 10|  5|  8|
++---+---+---+---+---+---+---+---+---+---+
+| 26| 35|  2| 57| 28| 33|  4|  7| 30| 11|
++---+---+---+---+---+---+---+---+---+---+
+| 59|100| 73| 36| 41| 56| 39| 32|  9|  6|
++---+---+---+---+---+---+---+---+---+---+
+| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
++---+---+---+---+---+---+---+---+---+---+
+| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
++---+---+---+---+---+---+---+---+---+---+
+| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
++---+---+---+---+---+---+---+---+---+---+
+| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
++---+---+---+---+---+---+---+---+---+---+
+| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
++---+---+---+---+---+---+---+---+---+---+
+| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
++---+---+---+---+---+---+---+---+---+---+
+| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
++---+---+---+---+---+---+---+---+---+---+
+"""
+
+weakref_tests = """\
+Generators are weakly referencable:
+
+>>> import weakref
+>>> def gen():
+...     yield 'foo!'
+...
+>>> wr = weakref.ref(gen)
+>>> wr() is gen
+True
+>>> p = weakref.proxy(gen)
+
+Generator-iterators are weakly referencable as well:
+
+>>> gi = gen()
+>>> wr = weakref.ref(gi)
+>>> wr() is gi
+True
+>>> p = weakref.proxy(gi)
+>>> list(p)
+['foo!']
+
+"""
+
+coroutine_tests = """\
+Sending a value into a started generator:
+
+>>> def f():
+...     print (yield 1)
+...     yield 2
+>>> g = f()
+>>> g.next()
+1
+>>> g.send(42)
+42
+2
+
+Sending a value into a new generator produces a TypeError:
+
+>>> f().send("foo")
+Traceback (most recent call last):
+...
+TypeError: can't send non-None value to a just-started generator
+
+
+Yield by itself yields None:
+
+>>> def f(): yield
+>>> list(f())
+[None]
+
+
+
+An obscene abuse of a yield expression within a generator expression:
+
+>>> list((yield 21) for i in range(4))
+[21, None, 21, None, 21, None, 21, None]
+
+And a more sane, but still weird usage:
+
+>>> def f(): list(i for i in [(yield 26)])
+>>> type(f())
+<type 'generator'>
+
+
+A yield expression with augmented assignment.
+
+>>> def coroutine(seq):
+...     count = 0
+...     while count < 200:
+...         count += yield
+...         seq.append(count)
+>>> seq = []
+>>> c = coroutine(seq)
+>>> c.next()
+>>> print seq
+[]
+>>> c.send(10)
+>>> print seq
+[10]
+>>> c.send(10)
+>>> print seq
+[10, 20]
+>>> c.send(10)
+>>> print seq
+[10, 20, 30]
+
+
+Check some syntax errors for yield expressions:
+
+>>> f=lambda: (yield 1),(yield 2)
+Traceback (most recent call last):
+  ...
+SyntaxError: 'yield' outside function (<doctest test.test_generators.__test__.coroutine[21]>, line 1)
+
+>>> def f(): return lambda x=(yield): 1
+Traceback (most recent call last):
+  ...
+SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1)
+
+>>> def f(): x = yield = y
+Traceback (most recent call last):
+  ...
+SyntaxError: assignment to yield expression not possible (<doctest test.test_generators.__test__.coroutine[23]>, line 1)
+
+>>> def f(): (yield bar) = y
+Traceback (most recent call last):
+  ...
+SyntaxError: can't assign to yield expression (<doctest test.test_generators.__test__.coroutine[24]>, line 1)
+
+>>> def f(): (yield bar) += y
+Traceback (most recent call last):
+  ...
+SyntaxError: augmented assignment to yield expression not possible (<doctest test.test_generators.__test__.coroutine[25]>, line 1)
+
+
+Now check some throw() conditions:
+
+>>> def f():
+...     while True:
+...         try:
+...             print (yield)
+...         except ValueError,v:
+...             print "caught ValueError (%s)" % (v),
+>>> import sys
+>>> g = f()
+>>> g.next()
+
+>>> g.throw(ValueError) # type only
+caught ValueError ()
+
+>>> g.throw(ValueError("xyz"))  # value only
+caught ValueError (xyz)
+
+>>> g.throw(ValueError, ValueError(1))   # value+matching type
+caught ValueError (1)
+
+>>> g.throw(ValueError, TypeError(1))  # mismatched type, rewrapped
+caught ValueError (1)
+
+>>> g.throw(ValueError, ValueError(1), None)   # explicit None traceback
+caught ValueError (1)
+
+>>> g.throw(ValueError(1), "foo")       # bad args
+Traceback (most recent call last):
+  ...
+TypeError: instance exception may not have a separate value
+
+>>> g.throw(ValueError, "foo", 23)      # bad args
+Traceback (most recent call last):
+  ...
+TypeError: throw() third argument must be a traceback object
+
+>>> def throw(g,exc):
+...     try:
+...         raise exc
+...     except:
+...         g.throw(*sys.exc_info())
+>>> throw(g,ValueError) # do it with traceback included
+caught ValueError ()
+
+>>> g.send(1)
+1
+
+>>> throw(g,TypeError)  # terminate the generator
+Traceback (most recent call last):
+  ...
+TypeError
+
+>>> print g.gi_frame
+None
+
+>>> g.send(2)
+Traceback (most recent call last):
+  ...
+StopIteration
+
+>>> g.throw(ValueError,6)       # throw on closed generator
+Traceback (most recent call last):
+  ...
+ValueError: 6
+
+>>> f().throw(ValueError,7)     # throw on just-opened generator
+Traceback (most recent call last):
+  ...
+ValueError: 7
+
+>>> f().throw("abc")     # throw on just-opened generator
+Traceback (most recent call last):
+  ...
+abc
+
+Now let's try closing a generator:
+
+>>> def f():
+...     try: yield
+...     except GeneratorExit:
+...         print "exiting"
+
+>>> g = f()
+>>> g.next()
+>>> g.close()
+exiting
+>>> g.close()  # should be no-op now
+
+>>> f().close()  # close on just-opened generator should be fine
+
+>>> def f(): yield      # an even simpler generator
+>>> f().close()         # close before opening
+>>> g = f()
+>>> g.next()
+>>> g.close()           # close normally
+
+And finalization:
+
+>>> def f():
+...     try: yield
+...     finally:
+...         print "exiting"
+
+>>> g = f()
+>>> g.next()
+>>> del g
+exiting
+
+
+Now let's try some ill-behaved generators:
+
+>>> def f():
+...     try: yield
+...     except GeneratorExit:
+...         yield "foo!"
+>>> g = f()
+>>> g.next()
+>>> g.close()
+Traceback (most recent call last):
+  ...
+RuntimeError: generator ignored GeneratorExit
+>>> g.close()
+
+
+Our ill-behaved code should be invoked during GC:
+
+>>> import sys, StringIO
+>>> old, sys.stderr = sys.stderr, StringIO.StringIO()
+>>> g = f()
+>>> g.next()
+>>> del g
+>>> sys.stderr.getvalue().startswith(
+...     "Exception exceptions.RuntimeError: 'generator ignored GeneratorExit' in "
+... )
+True
+>>> sys.stderr = old
+
+
+And errors thrown during closing should propagate:
+
+>>> def f():
+...     try: yield
+...     except GeneratorExit:
+...         raise TypeError("fie!")
+>>> g = f()
+>>> g.next()
+>>> g.close()
+Traceback (most recent call last):
+  ...
+TypeError: fie!
+
+
+Ensure that various yield expression constructs make their
+enclosing function a generator:
+
+>>> def f(): x += yield
+>>> type(f())
+<type 'generator'>
+
+>>> def f(): x = yield
+>>> type(f())
+<type 'generator'>
+
+>>> def f(): lambda x=(yield): 1
+>>> type(f())
+<type 'generator'>
+
+>>> def f(): x=(i for i in (yield) if (yield))
+>>> type(f())
+<type 'generator'>
+
+>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
+>>> data = [1,2]
+>>> g = f(data)
+>>> type(g)
+<type 'generator'>
+>>> g.send(None)
+'a'
+>>> data
+[1, 2]
+>>> g.send(0)
+'b'
+>>> data
+[27, 2]
+>>> try: g.send(1)
+... except StopIteration: pass
+>>> data
+[27, 27]
+
+"""
+
+refleaks_tests = """
+Prior to adding cycle-GC support to itertools.tee, this code would leak
+references. We add it to the standard suite so the routine refleak-tests
+would trigger if it starts being uncleanable again.
+
+>>> import itertools
+>>> def leak():
+...     class gen:
+...         def __iter__(self):
+...             return self
+...         def next(self):
+...             return self.item
+...     g = gen()
+...     head, tail = itertools.tee(g)
+...     g.item = head
+...     return head
+>>> it = leak()
+
+Make sure to also test the involvement of the tee-internal teedataobject,
+which stores returned items.
+
+>>> item = it.next()
+
+
+
+This test leaked at one point due to generator finalization/destruction.
+It was copied from Lib/test/leakers/test_generator_cycle.py before the file
+was removed.
+
+>>> def leak():
+...    def gen():
+...        while True:
+...            yield g
+...    g = gen()
+
+>>> leak()
+
+
+
+This test isn't really generator related, but rather exception-in-cleanup
+related. The coroutine tests (above) just happen to cause an exception in
+the generator's __del__ (tp_del) method. We can also test for this
+explicitly, without generators. We do have to redirect stderr to avoid
+printing warnings and to doublecheck that we actually tested what we wanted
+to test.
+
+>>> import sys, StringIO
+>>> old = sys.stderr
+>>> try:
+...     sys.stderr = StringIO.StringIO()
+...     class Leaker:
+...         def __del__(self):
+...             raise RuntimeError
+...
+...     l = Leaker()
+...     del l
+...     err = sys.stderr.getvalue().strip()
+...     err.startswith(
+...         "Exception exceptions.RuntimeError: RuntimeError() in <"
+...     )
+...     err.endswith("> ignored")
+...     len(err.splitlines())
+... finally:
+...     sys.stderr = old
+True
+True
+1
+
+
+
+These refleak tests should perhaps be in a testfile of their own,
+test_generators just happened to be the test that drew these out.
+
+"""
+
+__test__ = {"tut":      tutorial_tests,
+            "pep":      pep_tests,
+            "email":    email_tests,
+            "fun":      fun_tests,
+            "syntax":   syntax_tests,
+            "conjoin":  conjoin_tests,
+            "weakref":  weakref_tests,
+            "coroutine":  coroutine_tests,
+            "refleaks": refleaks_tests,
+            }
+
+# Magic test name that regrtest.py invokes *after* importing this module.
+# This worms around a bootstrap problem.
+# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
+# so this works as expected in both ways of running regrtest.
+def test_main(verbose=None):
+    from test import test_support, test_generators
+    test_support.run_doctest(test_generators, verbose)
+
+# This part isn't needed for regrtest, but for running the test directly.
+if __name__ == "__main__":
+    test_main(1)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_genexps.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_genexps.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,281 @@
+doctests = """
+
+Test simple loop with conditional
+
+    >>> sum(i*i for i in range(10) if i&1 == 1)
+    165
+
+Test simple nesting
+
+    >>> list((i,j) for i in range(3) for j in range(4) )
+    [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
+
+Test nesting with the inner expression dependent on the outer
+
+    >>> list((i,j) for i in range(4) for j in range(i) )
+    [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
+
+Make sure the induction variable is not exposed
+
+    >>> i = 20
+    >>> sum(i*i for i in range(5))
+    30
+    >>> i
+    20
+
+Test first class
+
+    >>> g = (i*i for i in range(4))
+    >>> type(g)
+    <type 'generator'>
+    >>> list(g)
+    [0, 1, 4, 9]
+
+Test direct calls to next()
+
+    >>> g = (i*i for i in range(3))
+    >>> g.next()
+    0
+    >>> g.next()
+    1
+    >>> g.next()
+    4
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<pyshell#21>", line 1, in -toplevel-
+        g.next()
+    StopIteration
+
+Does it stay stopped?
+
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<pyshell#21>", line 1, in -toplevel-
+        g.next()
+    StopIteration
+    >>> list(g)
+    []
+
+Test running gen when defining function is out of scope
+
+    >>> def f(n):
+    ...     return (i*i for i in xrange(n))
+    >>> list(f(10))
+    [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
+
+    >>> def f(n):
+    ...     return ((i,j) for i in xrange(3) for j in xrange(n))
+    >>> list(f(4))
+    [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
+    >>> def f(n):
+    ...     return ((i,j) for i in xrange(3) for j in xrange(4) if j in xrange(n))
+    >>> list(f(4))
+    [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
+    >>> list(f(2))
+    [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
+
+Verify that parenthesis are required in a statement
+
+    >>> def f(n):
+    ...     return i*i for i in xrange(n)
+    Traceback (most recent call last):
+       ...
+    SyntaxError: invalid syntax
+
+Verify that parenthesis are required when used as a keyword argument value
+
+    >>> dict(a = i for i in xrange(10))
+    Traceback (most recent call last):
+       ...
+    SyntaxError: invalid syntax
+
+Verify that parenthesis are required when used as a keyword argument value
+
+    >>> dict(a = (i for i in xrange(10))) #doctest: +ELLIPSIS
+    {'a': <generator object at ...>}
+
+Verify early binding for the outermost for-expression
+
+    >>> x=10
+    >>> g = (i*i for i in range(x))
+    >>> x = 5
+    >>> list(g)
+    [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
+
+Verify that the outermost for-expression makes an immediate check
+for iterability
+
+    >>> (i for i in 6)
+    Traceback (most recent call last):
+      File "<pyshell#4>", line 1, in -toplevel-
+        (i for i in 6)
+    TypeError: 'int' object is not iterable
+
+Verify late binding for the outermost if-expression
+
+    >>> include = (2,4,6,8)
+    >>> g = (i*i for i in range(10) if i in include)
+    >>> include = (1,3,5,7,9)
+    >>> list(g)
+    [1, 9, 25, 49, 81]
+
+Verify late binding for the innermost for-expression
+
+    >>> g = ((i,j) for i in range(3) for j in range(x))
+    >>> x = 4
+    >>> list(g)
+    [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
+
+Verify re-use of tuples (a side benefit of using genexps over listcomps)
+
+##    >>> tupleids = map(id, ((i,i) for i in xrange(10)))
+##    >>> int(max(tupleids) - min(tupleids))
+##    0
+
+Verify that syntax error's are raised for genexps used as lvalues
+
+    >>> (y for y in (1,2)) = 10
+    Traceback (most recent call last):
+       ...
+    SyntaxError: assign to generator expression not possible
+
+    >>> (y for y in (1,2)) += 10
+    Traceback (most recent call last):
+       ...
+    SyntaxError: augmented assign to tuple literal or generator expression not possible
+
+
+
+########### Tests borrowed from or inspired by test_generators.py ############
+
+Make a generator that acts like range()
+
+    >>> yrange = lambda n:  (i for i in xrange(n))
+    >>> list(yrange(10))
+    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+
+Generators always return to the most recent caller:
+
+    >>> def creator():
+    ...     r = yrange(5)
+    ...     print "creator", r.next()
+    ...     return r
+    >>> def caller():
+    ...     r = creator()
+    ...     for i in r:
+    ...             print "caller", i
+    >>> caller()
+    creator 0
+    caller 1
+    caller 2
+    caller 3
+    caller 4
+
+Generators can call other generators:
+
+    >>> def zrange(n):
+    ...     for i in yrange(n):
+    ...         yield i
+    >>> list(zrange(5))
+    [0, 1, 2, 3, 4]
+
+
+Verify that a gen exp cannot be resumed while it is actively running:
+
+    >>> g = (me.next() for i in xrange(10))
+    >>> me = g
+    >>> me.next()
+    Traceback (most recent call last):
+      File "<pyshell#30>", line 1, in -toplevel-
+        me.next()
+      File "<pyshell#28>", line 1, in <generator expression>
+        g = (me.next() for i in xrange(10))
+    ValueError: generator already executing
+
+Verify exception propagation
+
+    >>> g = (10 // i for i in (5, 0, 2))
+    >>> g.next()
+    2
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<pyshell#37>", line 1, in -toplevel-
+        g.next()
+      File "<pyshell#35>", line 1, in <generator expression>
+        g = (10 // i for i in (5, 0, 2))
+    ZeroDivisionError: integer division by zero
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<pyshell#38>", line 1, in -toplevel-
+        g.next()
+    StopIteration
+
+Make sure that None is a valid return value
+
+    >>> list(None for i in xrange(10))
+    [None, None, None, None, None, None, None, None, None, None]
+
+Check that generator attributes are present
+
+    >>> g = (i*i for i in range(3))
+    >>> expected = set(['gi_frame', 'gi_running', 'next'])
+    >>> set(attr for attr in dir(g) if not attr.startswith('__')) >= expected
+    True
+
+    >>> print g.next.__doc__
+    x.next() -> the next value, or raise StopIteration
+    >>> import types
+    >>> isinstance(g, types.GeneratorType)
+    True
+
+Check the __iter__ slot is defined to return self
+
+    >>> iter(g) is g
+    True
+
+Verify that the running flag is set properly
+
+    >>> g = (me.gi_running for i in (0,1))
+    >>> me = g
+    >>> me.gi_running
+    0
+    >>> me.next()
+    1
+    >>> me.gi_running
+    0
+
+Verify that genexps are weakly referencable
+
+    >>> import weakref
+    >>> g = (i*i for i in range(4))
+    >>> wr = weakref.ref(g)
+    >>> wr() is g
+    True
+    >>> p = weakref.proxy(g)
+    >>> list(p)
+    [0, 1, 4, 9]
+
+
+"""
+
+
+__test__ = {'doctests' : doctests}
+
+def test_main(verbose=None):
+    import sys
+    from test import test_support
+    from test import test_genexps
+    test_support.run_doctest(test_genexps, verbose)
+
+    # verify reference counting
+    if verbose and hasattr(sys, "gettotalrefcount"):
+        import gc
+        counts = [None] * 5
+        for i in xrange(len(counts)):
+            test_support.run_doctest(test_genexps, verbose)
+            gc.collect()
+            counts[i] = sys.gettotalrefcount()
+        print counts
+
+if __name__ == "__main__":
+    test_main(verbose=True)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_iter.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_iter.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,889 @@
+# Test iterators.
+
+import gc
+import unittest
+from test.test_support import run_unittest, TESTFN, unlink, have_unicode
+
+# Test result of triple loop (too big to inline)
+TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2),
+            (0, 1, 0), (0, 1, 1), (0, 1, 2),
+            (0, 2, 0), (0, 2, 1), (0, 2, 2),
+
+            (1, 0, 0), (1, 0, 1), (1, 0, 2),
+            (1, 1, 0), (1, 1, 1), (1, 1, 2),
+            (1, 2, 0), (1, 2, 1), (1, 2, 2),
+
+            (2, 0, 0), (2, 0, 1), (2, 0, 2),
+            (2, 1, 0), (2, 1, 1), (2, 1, 2),
+            (2, 2, 0), (2, 2, 1), (2, 2, 2)]
+
+# Helper classes
+
+class BasicIterClass:
+    def __init__(self, n):
+        self.n = n
+        self.i = 0
+    def next(self):
+        res = self.i
+        if res >= self.n:
+            raise StopIteration
+        self.i = res + 1
+        return res
+
+class IteratingSequenceClass:
+    def __init__(self, n):
+        self.n = n
+    def __iter__(self):
+        return BasicIterClass(self.n)
+
+class SequenceClass:
+    def __init__(self, n):
+        self.n = n
+    def __getitem__(self, i):
+        if 0 <= i < self.n:
+            return i
+        else:
+            raise IndexError
+
+# Main test suite
+
+class TestCase(unittest.TestCase):
+
+    # Helper to check that an iterator returns a given sequence
+    def check_iterator(self, it, seq):
+        res = []
+        while 1:
+            try:
+                val = it.next()
+            except StopIteration:
+                break
+            res.append(val)
+        self.assertEqual(res, seq)
+
+    # Helper to check that a for loop generates a given sequence
+    def check_for_loop(self, expr, seq):
+        res = []
+        for val in expr:
+            res.append(val)
+        self.assertEqual(res, seq)
+
+    # Test basic use of iter() function
+    def test_iter_basic(self):
+        self.check_iterator(iter(range(10)), range(10))
+
+    # Test that iter(iter(x)) is the same as iter(x)
+    def test_iter_idempotency(self):
+        seq = range(10)
+        it = iter(seq)
+        it2 = iter(it)
+        self.assert_(it is it2)
+
+    # Test that for loops over iterators work
+    def test_iter_for_loop(self):
+        self.check_for_loop(iter(range(10)), range(10))
+
+    # Test several independent iterators over the same list
+    def test_iter_independence(self):
+        seq = range(3)
+        res = []
+        for i in iter(seq):
+            for j in iter(seq):
+                for k in iter(seq):
+                    res.append((i, j, k))
+        self.assertEqual(res, TRIPLETS)
+
+    # Test triple list comprehension using iterators
+    def test_nested_comprehensions_iter(self):
+        seq = range(3)
+        res = [(i, j, k)
+               for i in iter(seq) for j in iter(seq) for k in iter(seq)]
+        self.assertEqual(res, TRIPLETS)
+
+    # Test triple list comprehension without iterators
+    def test_nested_comprehensions_for(self):
+        seq = range(3)
+        res = [(i, j, k) for i in seq for j in seq for k in seq]
+        self.assertEqual(res, TRIPLETS)
+
+    # Test a class with __iter__ in a for loop
+    def test_iter_class_for(self):
+        self.check_for_loop(IteratingSequenceClass(10), range(10))
+
+    # Test a class with __iter__ with explicit iter()
+    def test_iter_class_iter(self):
+        self.check_iterator(iter(IteratingSequenceClass(10)), range(10))
+
+    # Test for loop on a sequence class without __iter__
+    def test_seq_class_for(self):
+        self.check_for_loop(SequenceClass(10), range(10))
+
+    # Test iter() on a sequence class without __iter__
+    def test_seq_class_iter(self):
+        self.check_iterator(iter(SequenceClass(10)), range(10))
+
+    # Test two-argument iter() with callable instance
+    def test_iter_callable(self):
+        class C:
+            def __init__(self):
+                self.i = 0
+            def __call__(self):
+                i = self.i
+                self.i = i + 1
+                if i > 100:
+                    raise IndexError # Emergency stop
+                return i
+        self.check_iterator(iter(C(), 10), range(10))
+
+    # Test two-argument iter() with function
+    def test_iter_function(self):
+        def spam(state=[0]):
+            i = state[0]
+            state[0] = i+1
+            return i
+        self.check_iterator(iter(spam, 10), range(10))
+
+    # Test two-argument iter() with function that raises StopIteration
+    def test_iter_function_stop(self):
+        def spam(state=[0]):
+            i = state[0]
+            if i == 10:
+                raise StopIteration
+            state[0] = i+1
+            return i
+        self.check_iterator(iter(spam, 20), range(10))
+
+    # Test exception propagation through function iterator
+    def test_exception_function(self):
+        def spam(state=[0]):
+            i = state[0]
+            state[0] = i+1
+            if i == 10:
+                raise RuntimeError
+            return i
+        res = []
+        try:
+            for x in iter(spam, 20):
+                res.append(x)
+        except RuntimeError:
+            self.assertEqual(res, range(10))
+        else:
+            self.fail("should have raised RuntimeError")
+
+    # Test exception propagation through sequence iterator
+    def test_exception_sequence(self):
+        class MySequenceClass(SequenceClass):
+            def __getitem__(self, i):
+                if i == 10:
+                    raise RuntimeError
+                return SequenceClass.__getitem__(self, i)
+        res = []
+        try:
+            for x in MySequenceClass(20):
+                res.append(x)
+        except RuntimeError:
+            self.assertEqual(res, range(10))
+        else:
+            self.fail("should have raised RuntimeError")
+
+    # Test for StopIteration from __getitem__
+    def test_stop_sequence(self):
+        class MySequenceClass(SequenceClass):
+            def __getitem__(self, i):
+                if i == 10:
+                    raise StopIteration
+                return SequenceClass.__getitem__(self, i)
+        self.check_for_loop(MySequenceClass(20), range(10))
+
+    # Test a big range
+    def test_iter_big_range(self):
+        self.check_for_loop(iter(range(10000)), range(10000))
+
+    # Test an empty list
+    def test_iter_empty(self):
+        self.check_for_loop(iter([]), [])
+
+    # Test a tuple
+    def test_iter_tuple(self):
+        self.check_for_loop(iter((0,1,2,3,4,5,6,7,8,9)), range(10))
+
+    # Test an xrange
+    def test_iter_xrange(self):
+        self.check_for_loop(iter(xrange(10)), range(10))
+
+    # Test a string
+    def test_iter_string(self):
+        self.check_for_loop(iter("abcde"), ["a", "b", "c", "d", "e"])
+
+    # Test a Unicode string
+    if have_unicode:
+        def test_iter_unicode(self):
+            self.check_for_loop(iter(unicode("abcde")),
+                                [unicode("a"), unicode("b"), unicode("c"),
+                                 unicode("d"), unicode("e")])
+
+    # Test a directory
+    def test_iter_dict(self):
+        dict = {}
+        for i in range(10):
+            dict[i] = None
+        self.check_for_loop(dict, dict.keys())
+
+    # Test a file
+    def test_iter_file(self):
+        f = open(TESTFN, "w")
+        try:
+            for i in range(5):
+                f.write("%d\n" % i)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"])
+            self.check_for_loop(f, [])
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test list()'s use of iterators.
+    def test_builtin_list(self):
+        self.assertEqual(list(SequenceClass(5)), range(5))
+        self.assertEqual(list(SequenceClass(0)), [])
+        self.assertEqual(list(()), [])
+        self.assertEqual(list(range(10, -1, -1)), range(10, -1, -1))
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(list(d), d.keys())
+
+        self.assertRaises(TypeError, list, list)
+        self.assertRaises(TypeError, list, 42)
+
+        f = open(TESTFN, "w")
+        try:
+            for i in range(5):
+                f.write("%d\n" % i)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"])
+            f.seek(0, 0)
+            self.assertEqual(list(f),
+                             ["0\n", "1\n", "2\n", "3\n", "4\n"])
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test tuples()'s use of iterators.
+    def test_builtin_tuple(self):
+        self.assertEqual(tuple(SequenceClass(5)), (0, 1, 2, 3, 4))
+        self.assertEqual(tuple(SequenceClass(0)), ())
+        self.assertEqual(tuple([]), ())
+        self.assertEqual(tuple(()), ())
+        self.assertEqual(tuple("abc"), ("a", "b", "c"))
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(tuple(d), tuple(d.keys()))
+
+        self.assertRaises(TypeError, tuple, list)
+        self.assertRaises(TypeError, tuple, 42)
+
+        f = open(TESTFN, "w")
+        try:
+            for i in range(5):
+                f.write("%d\n" % i)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n"))
+            f.seek(0, 0)
+            self.assertEqual(tuple(f),
+                             ("0\n", "1\n", "2\n", "3\n", "4\n"))
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test filter()'s use of iterators.
+    def test_builtin_filter(self):
+        self.assertEqual(filter(None, SequenceClass(5)), range(1, 5))
+        self.assertEqual(filter(None, SequenceClass(0)), [])
+        self.assertEqual(filter(None, ()), ())
+        self.assertEqual(filter(None, "abc"), "abc")
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(filter(None, d), d.keys())
+
+        self.assertRaises(TypeError, filter, None, list)
+        self.assertRaises(TypeError, filter, None, 42)
+
+        class Boolean:
+            def __init__(self, truth):
+                self.truth = truth
+            def __nonzero__(self):
+                return self.truth
+        bTrue = Boolean(1)
+        bFalse = Boolean(0)
+
+        class Seq:
+            def __init__(self, *args):
+                self.vals = args
+            def __iter__(self):
+                class SeqIter:
+                    def __init__(self, vals):
+                        self.vals = vals
+                        self.i = 0
+                    def __iter__(self):
+                        return self
+                    def next(self):
+                        i = self.i
+                        self.i = i + 1
+                        if i < len(self.vals):
+                            return self.vals[i]
+                        else:
+                            raise StopIteration
+                return SeqIter(self.vals)
+
+        seq = Seq(*([bTrue, bFalse] * 25))
+        self.assertEqual(filter(lambda x: not x, seq), [bFalse]*25)
+        self.assertEqual(filter(lambda x: not x, iter(seq)), [bFalse]*25)
+
+    # Test max() and min()'s use of iterators.
+    def test_builtin_max_min(self):
+        self.assertEqual(max(SequenceClass(5)), 4)
+        self.assertEqual(min(SequenceClass(5)), 0)
+        self.assertEqual(max(8, -1), 8)
+        self.assertEqual(min(8, -1), -1)
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(max(d), "two")
+        self.assertEqual(min(d), "one")
+        self.assertEqual(max(d.itervalues()), 3)
+        self.assertEqual(min(iter(d.itervalues())), 1)
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("medium line\n")
+            f.write("xtra large line\n")
+            f.write("itty-bitty line\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(min(f), "itty-bitty line\n")
+            f.seek(0, 0)
+            self.assertEqual(max(f), "xtra large line\n")
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test map()'s use of iterators.
+    def test_builtin_map(self):
+        self.assertEqual(map(None, SequenceClass(5)), range(5))
+        self.assertEqual(map(lambda x: x+1, SequenceClass(5)), range(1, 6))
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(map(None, d), d.keys())
+        self.assertEqual(map(lambda k, d=d: (k, d[k]), d), d.items())
+        dkeys = d.keys()
+        expected = [(i < len(d) and dkeys[i] or None,
+                     i,
+                     i < len(d) and dkeys[i] or None)
+                    for i in range(5)]
+        self.assertEqual(map(None, d,
+                                   SequenceClass(5),
+                                   iter(d.iterkeys())),
+                         expected)
+
+        f = open(TESTFN, "w")
+        try:
+            for i in range(10):
+                f.write("xy" * i + "\n") # line i has len 2*i+1
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(map(len, f), range(1, 21, 2))
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test zip()'s use of iterators.
+    def test_builtin_zip(self):
+        self.assertEqual(zip(), [])
+        self.assertEqual(zip(*[]), [])
+        self.assertEqual(zip(*[(1, 2), 'ab']), [(1, 'a'), (2, 'b')])
+
+        self.assertRaises(TypeError, zip, None)
+        self.assertRaises(TypeError, zip, range(10), 42)
+        self.assertRaises(TypeError, zip, range(10), zip)
+
+        self.assertEqual(zip(IteratingSequenceClass(3)),
+                         [(0,), (1,), (2,)])
+        self.assertEqual(zip(SequenceClass(3)),
+                         [(0,), (1,), (2,)])
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(d.items(), zip(d, d.itervalues()))
+
+        # Generate all ints starting at constructor arg.
+        class IntsFrom:
+            def __init__(self, start):
+                self.i = start
+
+            def __iter__(self):
+                return self
+
+            def next(self):
+                i = self.i
+                self.i = i+1
+                return i
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "bbb\n" "cc\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(zip(IntsFrom(0), f, IntsFrom(-100)),
+                             [(0, "a\n", -100),
+                              (1, "bbb\n", -99),
+                              (2, "cc\n", -98)])
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+        self.assertEqual(zip(xrange(5)), [(i,) for i in range(5)])
+
+        # Classes that lie about their lengths.
+        class NoGuessLen5:
+            def __getitem__(self, i):
+                if i >= 5:
+                    raise IndexError
+                return i
+
+        class Guess3Len5(NoGuessLen5):
+            def __len__(self):
+                return 3
+
+        class Guess30Len5(NoGuessLen5):
+            def __len__(self):
+                return 30
+
+        self.assertEqual(len(Guess3Len5()), 3)
+        self.assertEqual(len(Guess30Len5()), 30)
+        self.assertEqual(zip(NoGuessLen5()), zip(range(5)))
+        self.assertEqual(zip(Guess3Len5()), zip(range(5)))
+        self.assertEqual(zip(Guess30Len5()), zip(range(5)))
+
+        expected = [(i, i) for i in range(5)]
+        for x in NoGuessLen5(), Guess3Len5(), Guess30Len5():
+            for y in NoGuessLen5(), Guess3Len5(), Guess30Len5():
+                self.assertEqual(zip(x, y), expected)
+
+    # Test reduces()'s use of iterators.
+    def test_builtin_reduce(self):
+        from operator import add
+        self.assertEqual(reduce(add, SequenceClass(5)), 10)
+        self.assertEqual(reduce(add, SequenceClass(5), 42), 52)
+        self.assertRaises(TypeError, reduce, add, SequenceClass(0))
+        self.assertEqual(reduce(add, SequenceClass(0), 42), 42)
+        self.assertEqual(reduce(add, SequenceClass(1)), 0)
+        self.assertEqual(reduce(add, SequenceClass(1), 42), 42)
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(reduce(add, d), "".join(d.keys()))
+
+    # This test case will be removed if we don't have Unicode
+    def test_unicode_join_endcase(self):
+
+        # This class inserts a Unicode object into its argument's natural
+        # iteration, in the 3rd position.
+        class OhPhooey:
+            def __init__(self, seq):
+                self.it = iter(seq)
+                self.i = 0
+
+            def __iter__(self):
+                return self
+
+            def next(self):
+                i = self.i
+                self.i = i+1
+                if i == 2:
+                    return unicode("fooled you!")
+                return self.it.next()
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" + "b\n" + "c\n")
+        finally:
+            f.close()
+
+        f = open(TESTFN, "r")
+        # Nasty:  string.join(s) can't know whether unicode.join() is needed
+        # until it's seen all of s's elements.  But in this case, f's
+        # iterator cannot be restarted.  So what we're testing here is
+        # whether string.join() can manage to remember everything it's seen
+        # and pass that on to unicode.join().
+        try:
+            got = " - ".join(OhPhooey(f))
+            self.assertEqual(got, unicode("a\n - b\n - fooled you! - c\n"))
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+    if not have_unicode:
+        def test_unicode_join_endcase(self): pass
+
+    # Test iterators with 'x in y' and 'x not in y'.
+    def test_in_and_not_in(self):
+        for sc5 in IteratingSequenceClass(5), SequenceClass(5):
+            for i in range(5):
+                self.assert_(i in sc5)
+            for i in "abc", -1, 5, 42.42, (3, 4), [], {1: 1}, 3-12j, sc5:
+                self.assert_(i not in sc5)
+
+        self.assertRaises(TypeError, lambda: 3 in 12)
+        self.assertRaises(TypeError, lambda: 3 not in map)
+
+        d = {"one": 1, "two": 2, "three": 3, 1j: 2j}
+        for k in d:
+            self.assert_(k in d)
+            self.assert_(k not in d.itervalues())
+        for v in d.values():
+            self.assert_(v in d.itervalues())
+            self.assert_(v not in d)
+        for k, v in d.iteritems():
+            self.assert_((k, v) in d.iteritems())
+            self.assert_((v, k) not in d.iteritems())
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "b\n" "c\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            for chunk in "abc":
+                f.seek(0, 0)
+                self.assert_(chunk not in f)
+                f.seek(0, 0)
+                self.assert_((chunk + "\n") in f)
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test iterators with operator.countOf (PySequence_Count).
+    def test_countOf(self):
+        from operator import countOf
+        self.assertEqual(countOf([1,2,2,3,2,5], 2), 3)
+        self.assertEqual(countOf((1,2,2,3,2,5), 2), 3)
+        self.assertEqual(countOf("122325", "2"), 3)
+        self.assertEqual(countOf("122325", "6"), 0)
+
+        self.assertRaises(TypeError, countOf, 42, 1)
+        self.assertRaises(TypeError, countOf, countOf, countOf)
+
+        d = {"one": 3, "two": 3, "three": 3, 1j: 2j}
+        for k in d:
+            self.assertEqual(countOf(d, k), 1)
+        self.assertEqual(countOf(d.itervalues(), 3), 3)
+        self.assertEqual(countOf(d.itervalues(), 2j), 1)
+        self.assertEqual(countOf(d.itervalues(), 1j), 0)
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "b\n" "c\n" "b\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            for letter, count in ("a", 1), ("b", 2), ("c", 1), ("d", 0):
+                f.seek(0, 0)
+                self.assertEqual(countOf(f, letter + "\n"), count)
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test iterators with operator.indexOf (PySequence_Index).
+    def test_indexOf(self):
+        from operator import indexOf
+        self.assertEqual(indexOf([1,2,2,3,2,5], 1), 0)
+        self.assertEqual(indexOf((1,2,2,3,2,5), 2), 1)
+        self.assertEqual(indexOf((1,2,2,3,2,5), 3), 3)
+        self.assertEqual(indexOf((1,2,2,3,2,5), 5), 5)
+        self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 0)
+        self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 6)
+
+        self.assertEqual(indexOf("122325", "2"), 1)
+        self.assertEqual(indexOf("122325", "5"), 5)
+        self.assertRaises(ValueError, indexOf, "122325", "6")
+
+        self.assertRaises(TypeError, indexOf, 42, 1)
+        self.assertRaises(TypeError, indexOf, indexOf, indexOf)
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "b\n" "c\n" "d\n" "e\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            fiter = iter(f)
+            self.assertEqual(indexOf(fiter, "b\n"), 1)
+            self.assertEqual(indexOf(fiter, "d\n"), 1)
+            self.assertEqual(indexOf(fiter, "e\n"), 0)
+            self.assertRaises(ValueError, indexOf, fiter, "a\n")
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+        iclass = IteratingSequenceClass(3)
+        for i in range(3):
+            self.assertEqual(indexOf(iclass, i), i)
+        self.assertRaises(ValueError, indexOf, iclass, -1)
+
+    # Test iterators with file.writelines().
+    def test_writelines(self):
+        f = file(TESTFN, "w")
+
+        try:
+            self.assertRaises(TypeError, f.writelines, None)
+            self.assertRaises(TypeError, f.writelines, 42)
+
+            f.writelines(["1\n", "2\n"])
+            f.writelines(("3\n", "4\n"))
+            f.writelines({'5\n': None})
+            f.writelines({})
+
+            # Try a big chunk too.
+            class Iterator:
+                def __init__(self, start, finish):
+                    self.start = start
+                    self.finish = finish
+                    self.i = self.start
+
+                def next(self):
+                    if self.i >= self.finish:
+                        raise StopIteration
+                    result = str(self.i) + '\n'
+                    self.i += 1
+                    return result
+
+                def __iter__(self):
+                    return self
+
+            class Whatever:
+                def __init__(self, start, finish):
+                    self.start = start
+                    self.finish = finish
+
+                def __iter__(self):
+                    return Iterator(self.start, self.finish)
+
+            f.writelines(Whatever(6, 6+2000))
+            f.close()
+
+            f = file(TESTFN)
+            expected = [str(i) + "\n" for i in range(1, 2006)]
+            self.assertEqual(list(f), expected)
+
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+
+    # Test iterators on RHS of unpacking assignments.
+    def test_unpack_iter(self):
+        a, b = 1, 2
+        self.assertEqual((a, b), (1, 2))
+
+        a, b, c = IteratingSequenceClass(3)
+        self.assertEqual((a, b, c), (0, 1, 2))
+
+        try:    # too many values
+            a, b = IteratingSequenceClass(3)
+        except ValueError:
+            pass
+        else:
+            self.fail("should have raised ValueError")
+
+        try:    # not enough values
+            a, b, c = IteratingSequenceClass(2)
+        except ValueError:
+            pass
+        else:
+            self.fail("should have raised ValueError")
+
+        try:    # not iterable
+            a, b, c = len
+        except TypeError:
+            pass
+        else:
+            self.fail("should have raised TypeError")
+
+        a, b, c = {1: 42, 2: 42, 3: 42}.itervalues()
+        self.assertEqual((a, b, c), (42, 42, 42))
+
+        f = open(TESTFN, "w")
+        lines = ("a\n", "bb\n", "ccc\n")
+        try:
+            for line in lines:
+                f.write(line)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            a, b, c = f
+            self.assertEqual((a, b, c), lines)
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+        (a, b), (c,) = IteratingSequenceClass(2), {42: 24}
+        self.assertEqual((a, b, c), (0, 1, 42))
+
+        # Test reference count behavior
+
+        class C(object):
+            count = 0
+            def __new__(cls):
+                cls.count += 1
+                return object.__new__(cls)
+            def __del__(self):
+                cls = self.__class__
+                assert cls.count > 0
+                cls.count -= 1
+        x = C()
+        self.assertEqual(C.count, 1)
+        del x
+        gc.collect()
+        self.assertEqual(C.count, 0)
+        l = [C(), C(), C()]
+        self.assertEqual(C.count, 3)
+        try:
+            a, b = iter(l)
+        except ValueError:
+            pass
+        del l
+        gc.collect()
+        self.assertEqual(C.count, 0)
+
+
+    # Make sure StopIteration is a "sink state".
+    # This tests various things that weren't sink states in Python 2.2.1,
+    # plus various things that always were fine.
+
+    def test_sinkstate_list(self):
+        # This used to fail
+        a = range(5)
+        b = iter(a)
+        self.assertEqual(list(b), range(5))
+        a.extend(range(5, 10))
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_tuple(self):
+        a = (0, 1, 2, 3, 4)
+        b = iter(a)
+        self.assertEqual(list(b), range(5))
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_string(self):
+        a = "abcde"
+        b = iter(a)
+        self.assertEqual(list(b), ['a', 'b', 'c', 'd', 'e'])
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_sequence(self):
+        # This used to fail
+        a = SequenceClass(5)
+        b = iter(a)
+        self.assertEqual(list(b), range(5))
+        a.n = 10
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_callable(self):
+        # This used to fail
+        def spam(state=[0]):
+            i = state[0]
+            state[0] = i+1
+            if i == 10:
+                raise AssertionError, "shouldn't have gotten this far"
+            return i
+        b = iter(spam, 5)
+        self.assertEqual(list(b), range(5))
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_dict(self):
+        # XXX For a more thorough test, see towards the end of:
+        # http://mail.python.org/pipermail/python-dev/2002-July/026512.html
+        a = {1:1, 2:2, 0:0, 4:4, 3:3}
+        for b in iter(a), a.iterkeys(), a.iteritems(), a.itervalues():
+            b = iter(a)
+            self.assertEqual(len(list(b)), 5)
+            self.assertEqual(list(b), [])
+
+    def test_sinkstate_yield(self):
+        def gen():
+            for i in range(5):
+                yield i
+        b = gen()
+        self.assertEqual(list(b), range(5))
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_range(self):
+        a = xrange(5)
+        b = iter(a)
+        self.assertEqual(list(b), range(5))
+        self.assertEqual(list(b), [])
+
+    def test_sinkstate_enumerate(self):
+        a = range(5)
+        e = enumerate(a)
+        b = iter(e)
+        self.assertEqual(list(b), zip(range(5), range(5)))
+        self.assertEqual(list(b), [])
+
+
+def test_main():
+    run_unittest(TestCase)
+
+
+if __name__ == "__main__":
+    test_main()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_itertools.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_itertools.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,975 @@
+import unittest
+from test import test_support
+from itertools import *
+from weakref import proxy
+import sys
+import operator
+import random
+
+def onearg(x):
+    'Test function of one argument'
+    return 2*x
+
+def errfunc(*args):
+    'Test function that raises an error'
+    raise ValueError
+
+def gen3():
+    'Non-restartable source sequence'
+    for i in (0, 1, 2):
+        yield i
+
+def isEven(x):
+    'Test predicate'
+    return x%2==0
+
+def isOdd(x):
+    'Test predicate'
+    return x%2==1
+
+class StopNow:
+    'Class emulating an empty iterable.'
+    def __iter__(self):
+        return self
+    def next(self):
+        raise StopIteration
+
+def take(n, seq):
+    'Convenience function for partially consuming a long of infinite iterable'
+    return list(islice(seq, n))
+
+class TestBasicOps(unittest.TestCase):
+    def test_chain(self):
+        self.assertEqual(list(chain('abc', 'def')), list('abcdef'))
+        self.assertEqual(list(chain('abc')), list('abc'))
+        self.assertEqual(list(chain('')), [])
+        self.assertEqual(take(4, chain('abc', 'def')), list('abcd'))
+        self.assertRaises(TypeError, chain, 2, 3)
+
+    def test_count(self):
+        self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
+        self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
+        self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
+        self.assertRaises(TypeError, count, 2, 3)
+        self.assertRaises(TypeError, count, 'a')
+        self.assertRaises(OverflowError, list, islice(count(sys.maxint-5), 10))
+        c = count(3)
+        self.assertEqual(repr(c), 'count(3)')
+        c.next()
+        self.assertEqual(repr(c), 'count(4)')
+        c = count(-9)
+        self.assertEqual(repr(c), 'count(-9)')
+        c.next()
+        self.assertEqual(c.next(), -8)
+
+    def test_cycle(self):
+        self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
+        self.assertEqual(list(cycle('')), [])
+        self.assertRaises(TypeError, cycle)
+        self.assertRaises(TypeError, cycle, 5)
+        self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
+
+    def test_groupby(self):
+        # Check whether it accepts arguments correctly
+        self.assertEqual([], list(groupby([])))
+        self.assertEqual([], list(groupby([], key=id)))
+        self.assertRaises(TypeError, list, groupby('abc', []))
+        self.assertRaises(TypeError, groupby, None)
+        self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
+
+        # Check normal input
+        s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
+             (2,15,22), (3,16,23), (3,17,23)]
+        dup = []
+        for k, g in groupby(s, lambda r:r[0]):
+            for elem in g:
+                self.assertEqual(k, elem[0])
+                dup.append(elem)
+        self.assertEqual(s, dup)
+
+        # Check nested case
+        dup = []
+        for k, g in groupby(s, lambda r:r[0]):
+            for ik, ig in groupby(g, lambda r:r[2]):
+                for elem in ig:
+                    self.assertEqual(k, elem[0])
+                    self.assertEqual(ik, elem[2])
+                    dup.append(elem)
+        self.assertEqual(s, dup)
+
+        # Check case where inner iterator is not used
+        keys = [k for k, g in groupby(s, lambda r:r[0])]
+        expectedkeys = set([r[0] for r in s])
+        self.assertEqual(set(keys), expectedkeys)
+        self.assertEqual(len(keys), len(expectedkeys))
+
+        # Exercise pipes and filters style
+        s = 'abracadabra'
+        # sort s | uniq
+        r = [k for k, g in groupby(sorted(s))]
+        self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
+        # sort s | uniq -d
+        r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
+        self.assertEqual(r, ['a', 'b', 'r'])
+        # sort s | uniq -c
+        r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
+        self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
+        # sort s | uniq -c | sort -rn | head -3
+        r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
+        self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
+
+        # iter.next failure
+        class ExpectedError(Exception):
+            pass
+        def delayed_raise(n=0):
+            for i in range(n):
+                yield 'yo'
+            raise ExpectedError
+        def gulp(iterable, keyp=None, func=list):
+            return [func(g) for k, g in groupby(iterable, keyp)]
+
+        # iter.next failure on outer object
+        self.assertRaises(ExpectedError, gulp, delayed_raise(0))
+        # iter.next failure on inner object
+        self.assertRaises(ExpectedError, gulp, delayed_raise(1))
+
+        # __cmp__ failure
+        class DummyCmp:
+            def __cmp__(self, dst):
+                raise ExpectedError
+        s = [DummyCmp(), DummyCmp(), None]
+
+        # __cmp__ failure on outer object
+        self.assertRaises(ExpectedError, gulp, s, func=id)
+        # __cmp__ failure on inner object
+        self.assertRaises(ExpectedError, gulp, s)
+
+        # keyfunc failure
+        def keyfunc(obj):
+            if keyfunc.skip > 0:
+                keyfunc.skip -= 1
+                return obj
+            else:
+                raise ExpectedError
+
+        # keyfunc failure on outer object
+        keyfunc.skip = 0
+        self.assertRaises(ExpectedError, gulp, [None], keyfunc)
+        keyfunc.skip = 1
+        self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
+
+    def test_ifilter(self):
+        self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
+        self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
+        self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
+        self.assertRaises(TypeError, ifilter)
+        self.assertRaises(TypeError, ifilter, lambda x:x)
+        self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
+        self.assertRaises(TypeError, ifilter, isEven, 3)
+        self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
+
+    def test_ifilterfalse(self):
+        self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
+        self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
+        self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
+        self.assertRaises(TypeError, ifilterfalse)
+        self.assertRaises(TypeError, ifilterfalse, lambda x:x)
+        self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
+        self.assertRaises(TypeError, ifilterfalse, isEven, 3)
+        self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
+
+    def test_izip(self):
+        ans = [(x,y) for x, y in izip('abc',count())]
+        self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
+        self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
+        self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
+        self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
+        self.assertEqual(list(izip('abcdef')), zip('abcdef'))
+        self.assertEqual(list(izip()), zip())
+        self.assertRaises(TypeError, izip, 3)
+        self.assertRaises(TypeError, izip, range(3), 3)
+        # Check tuple re-use (implementation detail)
+        self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
+                         zip('abc', 'def'))
+        self.assertEqual([pair for pair in izip('abc', 'def')],
+                         zip('abc', 'def'))
+        # the following test deals with a specific implementation detail,
+        # that izip "reuses" the SAME tuple object each time when it can;
+        # it does not apply correctly to pypy, so I'm commenting it -- AM
+        # ids = map(id, izip('abc', 'def'))
+        # self.assertEqual(min(ids), max(ids))
+        ids = map(id, list(izip('abc', 'def')))
+        self.assertEqual(len(dict.fromkeys(ids)), len(ids))
+
+    def test_repeat(self):
+        self.assertEqual(zip(xrange(3),repeat('a')),
+                         [(0, 'a'), (1, 'a'), (2, 'a')])
+        self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
+        self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
+        self.assertEqual(list(repeat('a', 0)), [])
+        self.assertEqual(list(repeat('a', -3)), [])
+        self.assertRaises(TypeError, repeat)
+        self.assertRaises(TypeError, repeat, None, 3, 4)
+        self.assertRaises(TypeError, repeat, None, 'a')
+        r = repeat(1+0j)
+        self.assertEqual(repr(r), 'repeat((1+0j))')
+        r = repeat(1+0j, 5)
+        self.assertEqual(repr(r), 'repeat((1+0j), 5)')
+        list(r)
+        self.assertEqual(repr(r), 'repeat((1+0j), 0)')
+
+    def test_imap(self):
+        self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
+                         [0**1, 1**2, 2**3])
+        self.assertEqual(list(imap(None, 'abc', range(5))),
+                         [('a',0),('b',1),('c',2)])
+        self.assertEqual(list(imap(None, 'abc', count())),
+                         [('a',0),('b',1),('c',2)])
+        self.assertEqual(take(2,imap(None, 'abc', count())),
+                         [('a',0),('b',1)])
+        self.assertEqual(list(imap(operator.pow, [])), [])
+        self.assertRaises(TypeError, imap)
+        self.assertRaises(TypeError, imap, operator.neg)
+        self.assertRaises(TypeError, imap(10, range(5)).next)
+        self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
+        self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
+
+    def test_starmap(self):
+        self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
+                         [0**1, 1**2, 2**3])
+        self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
+                         [0**1, 1**2, 2**3])
+        self.assertEqual(list(starmap(operator.pow, [])), [])
+        self.assertRaises(TypeError, list, starmap(operator.pow, [[4,5]]))
+        self.assertRaises(TypeError, starmap)
+        self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
+        self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
+        self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
+        self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
+
+    def test_islice(self):
+        for args in [          # islice(args) should agree with range(args)
+                (10, 20, 3),
+                (10, 3, 20),
+                (10, 20),
+                (10, 3),
+                (20,)
+                ]:
+            self.assertEqual(list(islice(xrange(100), *args)), range(*args))
+
+        for args, tgtargs in [  # Stop when seqn is exhausted
+                ((10, 110, 3), ((10, 100, 3))),
+                ((10, 110), ((10, 100))),
+                ((110,), (100,))
+                ]:
+            self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
+
+        # Test stop=None
+        self.assertEqual(list(islice(xrange(10), None)), range(10))
+        self.assertEqual(list(islice(xrange(10), None, None)), range(10))
+        self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
+        self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
+        self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
+
+        # Test number of items consumed     SF #1171417
+        it = iter(range(10))
+        self.assertEqual(list(islice(it, 3)), range(3))
+        self.assertEqual(list(it), range(3, 10))
+
+        # Test invalid arguments
+        self.assertRaises(TypeError, islice, xrange(10))
+        self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
+        self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
+        self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
+        self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
+        self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
+        self.assertRaises(ValueError, islice, xrange(10), 'a')
+        self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
+        self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
+        self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
+        self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
+        # too slow to test on pypy, weakened...:
+        # self.assertEqual(len(list(islice(count(), 1, 10, sys.maxint))), 1)
+        self.assertEqual(len(list(islice(count(), 1, 10, 99))), 1)
+
+    def test_takewhile(self):
+        data = [1, 3, 5, 20, 2, 4, 6, 8]
+        underten = lambda x: x<10
+        self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
+        self.assertEqual(list(takewhile(underten, [])), [])
+        self.assertRaises(TypeError, takewhile)
+        self.assertRaises(TypeError, takewhile, operator.pow)
+        self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
+        self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
+        self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
+        t = takewhile(bool, [1, 1, 1, 0, 0, 0])
+        self.assertEqual(list(t), [1, 1, 1])
+        self.assertRaises(StopIteration, t.next)
+
+    def test_dropwhile(self):
+        data = [1, 3, 5, 20, 2, 4, 6, 8]
+        underten = lambda x: x<10
+        self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
+        self.assertEqual(list(dropwhile(underten, [])), [])
+        self.assertRaises(TypeError, dropwhile)
+        self.assertRaises(TypeError, dropwhile, operator.pow)
+        self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
+        self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
+        self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
+
+    def test_tee(self):
+        n = 20
+        def irange(n):
+            for i in xrange(n):
+                yield i
+
+        a, b = tee([])        # test empty iterator
+        self.assertEqual(list(a), [])
+        self.assertEqual(list(b), [])
+
+        a, b = tee(irange(n)) # test 100% interleaved
+        self.assertEqual(zip(a,b), zip(range(n),range(n)))
+
+        a, b = tee(irange(n)) # test 0% interleaved
+        self.assertEqual(list(a), range(n))
+        self.assertEqual(list(b), range(n))
+
+        a, b = tee(irange(n)) # test dealloc of leading iterator
+        for i in xrange(n // 2):
+            self.assertEqual(a.next(), i)
+        del a
+        self.assertEqual(list(b), range(n))
+
+        a, b = tee(irange(n)) # test dealloc of trailing iterator
+        for i in xrange(n // 2):
+            self.assertEqual(a.next(), i)
+        del b
+        self.assertEqual(list(a), range(n // 2, n))
+
+        for j in xrange(5):   # test randomly interleaved
+            order = [0]*n + [1]*n
+            random.shuffle(order)
+            lists = ([], [])
+            its = tee(irange(n))
+            for i in order:
+                value = its[i].next()
+                lists[i].append(value)
+            self.assertEqual(lists[0], range(n))
+            self.assertEqual(lists[1], range(n))
+
+        # test argument format checking
+        self.assertRaises(TypeError, tee)
+        self.assertRaises(TypeError, tee, 3)
+        self.assertRaises(TypeError, tee, [1,2], 'x')
+        self.assertRaises(TypeError, tee, [1,2], 3, 'x')
+
+        # tee object should be instantiable
+        a, b = tee('abc')
+        c = type(a)('def')
+        self.assertEqual(list(c), list('def'))
+
+        # test long-lagged and multi-way split
+        a, b, c = tee(xrange(n), 3)
+        for i in xrange(n // 2):
+            self.assertEqual(a.next(), i)
+        self.assertEqual(list(b), range(n))
+        self.assertEqual([c.next(), c.next()], range(2))
+        self.assertEqual(list(a), range(n // 2, n))
+        self.assertEqual(list(c), range(2, n))
+
+        # test values of n
+        self.assertRaises(TypeError, tee, 'abc', 'invalid')
+        self.assertRaises(ValueError, tee, [], -1)
+        for n in xrange(5):
+            result = tee('abc', n)
+            self.assertEqual(type(result), tuple)
+            self.assertEqual(len(result), n)
+            self.assertEqual(map(list, result), [list('abc')]*n)
+
+        # tee pass-through to copyable iterator
+        a, b = tee('abc')
+        c, d = tee(a)
+        self.assert_(a is c)
+
+        # test tee_new
+        t1, t2 = tee('abc')
+        tnew = type(t1)
+        self.assertRaises(TypeError, tnew)
+        self.assertRaises(TypeError, tnew, 10)
+        t3 = tnew(t1)
+        self.assert_(list(t1) == list(t2) == list(t3) == list('abc'))
+
+        # Commented out until weakref support is implemented.
+#        # test that tee objects are weak referencable
+#        a, b = tee(xrange(10))
+#        p = proxy(a)
+#        self.assertEqual(getattr(p, '__class__'), type(b))
+#        del a
+#        self.assertRaises(ReferenceError, getattr, p, '__class__')
+
+    def test_StopIteration(self):
+        self.assertRaises(StopIteration, izip().next)
+
+        for f in (chain, cycle, izip, groupby):
+            self.assertRaises(StopIteration, f([]).next)
+            self.assertRaises(StopIteration, f(StopNow()).next)
+
+        self.assertRaises(StopIteration, islice([], None).next)
+        self.assertRaises(StopIteration, islice(StopNow(), None).next)
+
+        p, q = tee([])
+        self.assertRaises(StopIteration, p.next)
+        self.assertRaises(StopIteration, q.next)
+        p, q = tee(StopNow())
+        self.assertRaises(StopIteration, p.next)
+        self.assertRaises(StopIteration, q.next)
+
+        self.assertRaises(StopIteration, repeat(None, 0).next)
+
+        for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
+            self.assertRaises(StopIteration, f(lambda x:x, []).next)
+            self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
+
+class TestGC(unittest.TestCase):
+
+    def makecycle(self, iterator, container):
+        container.append(iterator)
+        iterator.next()
+        del container, iterator
+
+    def test_chain(self):
+        a = []
+        self.makecycle(chain(a), a)
+
+    def test_cycle(self):
+        a = []
+        self.makecycle(cycle([a]*2), a)
+
+    def test_dropwhile(self):
+        a = []
+        self.makecycle(dropwhile(bool, [0, a, a]), a)
+
+    def test_groupby(self):
+        a = []
+        self.makecycle(groupby([a]*2, lambda x:x), a)
+
+    def test_ifilter(self):
+        a = []
+        self.makecycle(ifilter(lambda x:True, [a]*2), a)
+
+    def test_ifilterfalse(self):
+        a = []
+        self.makecycle(ifilterfalse(lambda x:False, a), a)
+
+    def test_izip(self):
+        a = []
+        self.makecycle(izip([a]*2, [a]*3), a)
+
+    def test_imap(self):
+        a = []
+        self.makecycle(imap(lambda x:x, [a]*2), a)
+
+    def test_islice(self):
+        a = []
+        self.makecycle(islice([a]*2, None), a)
+
+    def test_repeat(self):
+        a = []
+        self.makecycle(repeat(a), a)
+
+    def test_starmap(self):
+        a = []
+        self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
+
+    def test_takewhile(self):
+        a = []
+        self.makecycle(takewhile(bool, [1, 0, a, a]), a)
+
+def R(seqn):
+    'Regular generator'
+    for i in seqn:
+        yield i
+
+class G:
+    'Sequence using __getitem__'
+    def __init__(self, seqn):
+        self.seqn = seqn
+    def __getitem__(self, i):
+        return self.seqn[i]
+
+class I:
+    'Sequence using iterator protocol'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        return self
+    def next(self):
+        if self.i >= len(self.seqn): raise StopIteration
+        v = self.seqn[self.i]
+        self.i += 1
+        return v
+
+class Ig:
+    'Sequence using iterator protocol defined with a generator'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        for val in self.seqn:
+            yield val
+
+class X:
+    'Missing __getitem__ and __iter__'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def next(self):
+        if self.i >= len(self.seqn): raise StopIteration
+        v = self.seqn[self.i]
+        self.i += 1
+        return v
+
+class N:
+    'Iterator missing next()'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        return self
+
+class E:
+    'Test propagation of exceptions'
+    def __init__(self, seqn):
+        self.seqn = seqn
+        self.i = 0
+    def __iter__(self):
+        return self
+    def next(self):
+        3 // 0
+
+class S:
+    'Test immediate stop'
+    def __init__(self, seqn):
+        pass
+    def __iter__(self):
+        return self
+    def next(self):
+        raise StopIteration
+
+def L(seqn):
+    'Test multiple tiers of iterators'
+    return chain(imap(lambda x:x, R(Ig(G(seqn)))))
+
+
+class TestVariousIteratorArgs(unittest.TestCase):
+
+    def test_chain(self):
+        for s in ("123", "", range(10), ('do', 1.2), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual(list(chain(g(s))), list(g(s)))
+                self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
+            self.assertRaises(TypeError, chain, X(s))
+            self.assertRaises(TypeError, list, chain(N(s)))
+            self.assertRaises(ZeroDivisionError, list, chain(E(s)))
+
+    def test_cycle(self):
+        for s in ("123", "", range(10), ('do', 1.2), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                tgtlen = len(s) * 3
+                expected = list(g(s))*3
+                actual = list(islice(cycle(g(s)), tgtlen))
+                self.assertEqual(actual, expected)
+            self.assertRaises(TypeError, cycle, X(s))
+            self.assertRaises(TypeError, list, cycle(N(s)))
+            self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
+
+    def test_groupby(self):
+        for s in (range(10), range(0), range(10), (7,11), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
+            self.assertRaises(TypeError, groupby, X(s))
+            self.assertRaises(TypeError, list, groupby(N(s)))
+            self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
+
+    def test_ifilter(self):
+        for s in (range(10), range(0), range(10), (7,11), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
+            self.assertRaises(TypeError, ifilter, isEven, X(s))
+            self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
+            self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
+
+    def test_ifilterfalse(self):
+        for s in (range(10), range(0), range(10), (7,11), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
+            self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
+            self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
+            self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
+
+    def test_izip(self):
+        for s in ("123", "", range(10), ('do', 1.2), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual(list(izip(g(s))), zip(g(s)))
+                self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
+            self.assertRaises(TypeError, izip, X(s))
+            self.assertRaises(TypeError, list, izip(N(s)))
+            self.assertRaises(ZeroDivisionError, list, izip(E(s)))
+
+    def test_imap(self):
+        for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
+                self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
+            self.assertRaises(TypeError, imap, onearg, X(s))
+            self.assertRaises(TypeError, list, imap(onearg, N(s)))
+            self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
+
+    def test_islice(self):
+        for s in ("12345", "", range(10), ('do', 1.2), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
+            self.assertRaises(TypeError, islice, X(s), 10)
+            self.assertRaises(TypeError, list, islice(N(s), 10))
+            self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
+
+    def test_starmap(self):
+        for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
+            for g in (G, I, Ig, S, L, R):
+                ss = zip(s, s)
+                self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
+            self.assertRaises(TypeError, starmap, operator.pow, X(ss))
+            self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
+            self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
+
+    def test_takewhile(self):
+        for s in (range(10), range(0), range(10), (7,11), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                tgt = []
+                for elem in g(s):
+                    if not isEven(elem): break
+                    tgt.append(elem)
+                self.assertEqual(list(takewhile(isEven, g(s))), tgt)
+            self.assertRaises(TypeError, takewhile, isEven, X(s))
+            self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
+            self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
+
+    def test_dropwhile(self):
+        for s in (range(10), range(0), range(10), (7,11), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                tgt = []
+                for elem in g(s):
+                    if not tgt and isOdd(elem): continue
+                    tgt.append(elem)
+                self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
+            self.assertRaises(TypeError, dropwhile, isOdd, X(s))
+            self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
+            self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
+
+    def test_tee(self):
+        for s in ("123", "", range(10), ('do', 1.2), xrange(2000,2030,5)):
+            for g in (G, I, Ig, S, L, R):
+                it1, it2 = tee(g(s))
+                self.assertEqual(list(it1), list(g(s)))
+                self.assertEqual(list(it2), list(g(s)))
+            self.assertRaises(TypeError, tee, X(s))
+            self.assertRaises(TypeError, list, tee(N(s))[0])
+            self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
+
+class LengthTransparency(unittest.TestCase):
+
+    def test_repeat(self):
+        from test.test_iterlen import len
+        self.assertEqual(len(repeat(None, 50)), 50)
+        self.assertRaises(TypeError, len, repeat(None))
+
+class RegressionTests(unittest.TestCase):
+
+    def test_sf_793826(self):
+        # Fix Armin Rigo's successful efforts to wreak havoc
+
+        def mutatingtuple(tuple1, f, tuple2):
+            # this builds a tuple t which is a copy of tuple1,
+            # then calls f(t), then mutates t to be equal to tuple2
+            # (needs len(tuple1) == len(tuple2)).
+            def g(value, first=[1]):
+                if first:
+                    del first[:]
+                    f(z.next())
+                return value
+            items = list(tuple2)
+            items[1:1] = list(tuple1)
+            gen = imap(g, items)
+            z = izip(*[gen]*len(tuple1))
+            z.next()
+
+        def f(t):
+            global T
+            T = t
+            first[:] = list(T)
+
+        first = []
+        mutatingtuple((1,2,3), f, (4,5,6))
+        second = list(T)
+        self.assertEqual(first, second)
+
+
+    def test_sf_950057(self):
+        # Make sure that chain() and cycle() catch exceptions immediately
+        # rather than when shifting between input sources
+
+        def gen1():
+            hist.append(0)
+            yield 1
+            hist.append(1)
+            raise AssertionError
+            hist.append(2)
+
+        def gen2(x):
+            hist.append(3)
+            yield 2
+            hist.append(4)
+            if x:
+                raise StopIteration
+
+        hist = []
+        self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
+        self.assertEqual(hist, [0,1])
+
+        hist = []
+        self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
+        self.assertEqual(hist, [0,1])
+
+        hist = []
+        self.assertRaises(AssertionError, list, cycle(gen1()))
+        self.assertEqual(hist, [0,1])
+
+class SubclassWithKwargsTest(unittest.TestCase):
+    def test_keywords_in_subclass(self):
+        # count is not subclassable...
+        for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
+                    starmap, islice, takewhile, dropwhile, cycle):
+            class Subclass(cls):
+                def __init__(self, newarg=None, *args):
+                    cls.__init__(self, *args)
+            try:
+                Subclass(newarg=1)
+            except TypeError, err:
+                # we expect type errors because of wrong argument count
+                self.failIf("does not take keyword arguments" in err.args[0])
+
+
+libreftest = """ Doctest for examples in the library reference: libitertools.tex
+
+
+>>> amounts = [120.15, 764.05, 823.14]
+>>> for checknum, amount in izip(count(1200), amounts):
+...     print 'Check %d is for $%.2f' % (checknum, amount)
+...
+Check 1200 is for $120.15
+Check 1201 is for $764.05
+Check 1202 is for $823.14
+
+>>> import operator
+>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
+...    print cube
+...
+1
+8
+27
+
+>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
+>>> for name in islice(reportlines, 3, None, 2):
+...    print name.title()
+...
+Alex
+Laura
+Martin
+Walter
+Samuele
+
+>>> from operator import itemgetter
+>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
+>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
+>>> for k, g in groupby(di, itemgetter(1)):
+...     print k, map(itemgetter(0), g)
+...
+1 ['a', 'c', 'e']
+2 ['b', 'd', 'f']
+3 ['g']
+
+# Find runs of consecutive numbers using groupby.  The key to the solution
+# is differencing with a range so that consecutive numbers all appear in
+# same group.
+>>> data = [ 1,  4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
+>>> for k, g in groupby(enumerate(data), lambda (i,x):i-x):
+...     print map(operator.itemgetter(1), g)
+...
+[1]
+[4, 5, 6]
+[10]
+[15, 16, 17, 18]
+[22]
+[25, 26, 27, 28]
+
+>>> def take(n, seq):
+...     return list(islice(seq, n))
+
+>>> def enumerate(iterable):
+...     return izip(count(), iterable)
+
+>>> def tabulate(function):
+...     "Return function(0), function(1), ..."
+...     return imap(function, count())
+
+>>> def iteritems(mapping):
+...     return izip(mapping.iterkeys(), mapping.itervalues())
+
+>>> def nth(iterable, n):
+...     "Returns the nth item"
+...     return list(islice(iterable, n, n+1))
+
+>>> def all(seq, pred=None):
+...     "Returns True if pred(x) is true for every element in the iterable"
+...     for elem in ifilterfalse(pred, seq):
+...         return False
+...     return True
+
+>>> def any(seq, pred=None):
+...     "Returns True if pred(x) is true for at least one element in the iterable"
+...     for elem in ifilter(pred, seq):
+...         return True
+...     return False
+
+>>> def no(seq, pred=None):
+...     "Returns True if pred(x) is false for every element in the iterable"
+...     for elem in ifilter(pred, seq):
+...         return False
+...     return True
+
+>>> def quantify(seq, pred=None):
+...     "Count how many times the predicate is true in the sequence"
+...     return sum(imap(pred, seq))
+
+>>> def padnone(seq):
+...     "Returns the sequence elements and then returns None indefinitely"
+...     return chain(seq, repeat(None))
+
+>>> def ncycles(seq, n):
+...     "Returns the sequence elements n times"
+...     return chain(*repeat(seq, n))
+
+>>> def dotproduct(vec1, vec2):
+...     return sum(imap(operator.mul, vec1, vec2))
+
+>>> def flatten(listOfLists):
+...     return list(chain(*listOfLists))
+
+>>> def repeatfunc(func, times=None, *args):
+...     "Repeat calls to func with specified arguments."
+...     "   Example:  repeatfunc(random.random)"
+...     if times is None:
+...         return starmap(func, repeat(args))
+...     else:
+...         return starmap(func, repeat(args, times))
+
+>>> def pairwise(iterable):
+...     "s -> (s0,s1), (s1,s2), (s2, s3), ..."
+...     a, b = tee(iterable)
+...     try:
+...         b.next()
+...     except StopIteration:
+...         pass
+...     return izip(a, b)
+
+This is not part of the examples but it tests to make sure the definitions
+perform as purported.
+
+>>> take(10, count())
+[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+
+>>> list(enumerate('abc'))
+[(0, 'a'), (1, 'b'), (2, 'c')]
+
+>>> list(islice(tabulate(lambda x: 2*x), 4))
+[0, 2, 4, 6]
+
+>>> nth('abcde', 3)
+['d']
+
+>>> all([2, 4, 6, 8], lambda x: x%2==0)
+True
+
+>>> all([2, 3, 6, 8], lambda x: x%2==0)
+False
+
+>>> any([2, 4, 6, 8], lambda x: x%2==0)
+True
+
+>>> any([1, 3, 5, 9], lambda x: x%2==0,)
+False
+
+>>> no([1, 3, 5, 9], lambda x: x%2==0)
+True
+
+>>> no([1, 2, 5, 9], lambda x: x%2==0)
+False
+
+>>> quantify(xrange(99), lambda x: x%2==0)
+50
+
+>>> a = [[1, 2, 3], [4, 5, 6]]
+>>> flatten(a)
+[1, 2, 3, 4, 5, 6]
+
+>>> list(repeatfunc(pow, 5, 2, 3))
+[8, 8, 8, 8, 8]
+
+>>> import random
+>>> take(5, imap(int, repeatfunc(random.random)))
+[0, 0, 0, 0, 0]
+
+>>> list(pairwise('abcd'))
+[('a', 'b'), ('b', 'c'), ('c', 'd')]
+
+>>> list(pairwise([]))
+[]
+
+>>> list(pairwise('a'))
+[]
+
+>>> list(islice(padnone('abc'), 0, 6))
+['a', 'b', 'c', None, None, None]
+
+>>> list(ncycles('abc', 3))
+['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
+
+>>> dotproduct([1,2,3], [4,5,6])
+32
+
+"""
+
+__test__ = {'libreftest' : libreftest}
+
+def test_main(verbose=None):
+    test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
+                    RegressionTests, LengthTransparency,
+                    SubclassWithKwargsTest)
+    test_support.run_unittest(*test_classes)
+
+    # verify reference counting
+    if verbose and hasattr(sys, "gettotalrefcount"):
+        import gc
+        counts = [None] * 5
+        for i in xrange(len(counts)):
+            test_support.run_unittest(*test_classes)
+            gc.collect()
+            counts[i] = sys.gettotalrefcount()
+        print counts
+
+    # doctest the examples in the library reference
+    test_support.run_doctest(sys.modules[__name__], verbose)
+
+if __name__ == "__main__":
+    test_main(verbose=True)

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_marshal.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_marshal.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+
+from test import test_support
+import marshal
+import sys
+import unittest
+import os
+
+def dump_and_load(obj):
+    f = file(test_support.TESTFN, "wb")
+    marshal.dump(obj, f)
+    f.close()
+    f = file(test_support.TESTFN, "rb")
+    got = marshal.load(f)
+    f.close()
+    return got
+
+class IntTestCase(unittest.TestCase):
+    def test_ints(self):
+        # Test the full range of Python ints.
+        n = sys.maxint
+        while n:
+            for expected in (-n, n):
+                s = marshal.dumps(expected)
+                got = marshal.loads(s)
+                self.assertEqual(expected, got)
+                got = dump_and_load(expected)
+                self.assertEqual(expected, got)
+            n = n >> 1
+        os.unlink(test_support.TESTFN)
+
+    def test_int64(self):
+        # Simulate int marshaling on a 64-bit box.  This is most interesting if
+        # we're running the test on a 32-bit box, of course.
+
+        def to_little_endian_string(value, nbytes):
+            bytes = []
+            for i in range(nbytes):
+                bytes.append(chr(value & 0xff))
+                value >>= 8
+            return ''.join(bytes)
+
+        maxint64 = (1L << 63) - 1
+        minint64 = -maxint64-1
+
+        for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
+            while base:
+                s = 'I' + to_little_endian_string(base, 8)
+                got = marshal.loads(s)
+                self.assertEqual(base, got)
+                if base == -1:  # a fixed-point for shifting right 1
+                    base = 0
+                else:
+                    base >>= 1
+
+    def test_bool(self):
+        for b in (True, False):
+            new = marshal.loads(marshal.dumps(b))
+            self.assertEqual(b, new)
+            self.assertEqual(type(b), type(new))
+            new = dump_and_load(b)
+            self.assertEqual(b, new)
+            self.assertEqual(type(b), type(new))
+
+class FloatTestCase(unittest.TestCase):
+    def test_floats(self):
+        # Test a few floats
+        small = 1e-25
+        n = sys.maxint * 3.7e250
+        while n > small:
+            for expected in (-n, n):
+                f = float(expected)
+                s = marshal.dumps(f)
+                got = marshal.loads(s)
+                self.assertEqual(f, got)
+                got = dump_and_load(f)
+                self.assertEqual(f, got)
+            n /= 123.4567
+
+        f = 0.0
+        s = marshal.dumps(f)
+        got = marshal.loads(s)
+        self.assertEqual(f, got)
+        # and with version <= 1 (floats marshalled differently then)
+        s = marshal.dumps(f, 1)
+        got = marshal.loads(s)
+        self.assertEqual(f, got)
+
+        n = sys.maxint * 3.7e-250
+        while n < small:
+            for expected in (-n, n):
+                f = float(expected)
+                s = marshal.dumps(f)
+                got = marshal.loads(s)
+                self.assertEqual(f, got)
+                got = dump_and_load(f)
+                self.assertEqual(f, got)
+            n *= 123.4567
+        os.unlink(test_support.TESTFN)
+
+class StringTestCase(unittest.TestCase):
+    def test_unicode(self):
+        for s in [u"", u"Andrè Previn", u"abc", u" "*10000]:
+            new = marshal.loads(marshal.dumps(s))
+            self.assertEqual(s, new)
+            self.assertEqual(type(s), type(new))
+            new = dump_and_load(s)
+            self.assertEqual(s, new)
+            self.assertEqual(type(s), type(new))
+        os.unlink(test_support.TESTFN)
+
+    def test_string(self):
+        for s in ["", "Andrè Previn", "abc", " "*10000]:
+            new = marshal.loads(marshal.dumps(s))
+            self.assertEqual(s, new)
+            self.assertEqual(type(s), type(new))
+            new = dump_and_load(s)
+            self.assertEqual(s, new)
+            self.assertEqual(type(s), type(new))
+        os.unlink(test_support.TESTFN)
+
+    def test_buffer(self):
+        for s in ["", "Andrè Previn", "abc", " "*10000]:
+            b = buffer(s)
+            new = marshal.loads(marshal.dumps(b))
+            self.assertEqual(s, new)
+            new = dump_and_load(b)
+            self.assertEqual(s, new)
+        os.unlink(test_support.TESTFN)
+
+class ExceptionTestCase(unittest.TestCase):
+    def test_exceptions(self):
+        new = marshal.loads(marshal.dumps(StopIteration))
+        self.assertEqual(StopIteration, new)
+
+class CodeTestCase(unittest.TestCase):
+    def test_code(self):
+        co = ExceptionTestCase.test_exceptions.func_code
+        new = marshal.loads(marshal.dumps(co))
+        self.assertEqual(co, new)
+
+class ContainerTestCase(unittest.TestCase):
+    d = {'astring': 'foo at bar.baz.spam',
+         'afloat': 7283.43,
+         'anint': 2**20,
+         'ashortlong': 2L,
+         'alist': ['.zyx.41'],
+         'atuple': ('.zyx.41',)*10,
+         'aboolean': False,
+         'aunicode': u"Andrè Previn"
+         }
+    def test_dict(self):
+        new = marshal.loads(marshal.dumps(self.d))
+        self.assertEqual(self.d, new)
+        new = dump_and_load(self.d)
+        self.assertEqual(self.d, new)
+        os.unlink(test_support.TESTFN)
+
+    def test_list(self):
+        lst = self.d.items()
+        new = marshal.loads(marshal.dumps(lst))
+        self.assertEqual(lst, new)
+        new = dump_and_load(lst)
+        self.assertEqual(lst, new)
+        os.unlink(test_support.TESTFN)
+
+    def test_tuple(self):
+        t = tuple(self.d.keys())
+        new = marshal.loads(marshal.dumps(t))
+        self.assertEqual(t, new)
+        new = dump_and_load(t)
+        self.assertEqual(t, new)
+        os.unlink(test_support.TESTFN)
+
+    def test_sets(self):
+        for constructor in (set, frozenset):
+            t = constructor(self.d.keys())
+            new = marshal.loads(marshal.dumps(t))
+            self.assertEqual(t, new)
+            self.assert_(isinstance(new, constructor))
+            self.assertNotEqual(id(t), id(new))
+            new = dump_and_load(t)
+            self.assertEqual(t, new)
+            os.unlink(test_support.TESTFN)
+
+class BugsTestCase(unittest.TestCase):
+    def test_bug_5888452(self):
+        # Simple-minded check for SF 588452: Debug build crashes
+        marshal.dumps([128] * 1000)
+
+    def test_patch_873224(self):
+        self.assertRaises(Exception, marshal.loads, '0')
+        self.assertRaises(Exception, marshal.loads, 'f')
+        self.assertRaises(Exception, marshal.loads, marshal.dumps(5L)[:-1])
+
+    def test_version_argument(self):
+        # Python 2.4.0 crashes for any call to marshal.dumps(x, y)
+        self.assertEquals(marshal.loads(marshal.dumps(5, 0)), 5)
+        self.assertEquals(marshal.loads(marshal.dumps(5, 1)), 5)
+
+    def test_fuzz(self):
+        # simple test that it's at least not *totally* trivial to
+        # crash from bad marshal data
+        for c in [chr(i) for i in range(256)]:
+            try:
+                marshal.loads(c)
+            except Exception:
+                pass
+
+def test_main():
+    test_support.run_unittest(IntTestCase,
+                              FloatTestCase,
+                              StringTestCase,
+                              CodeTestCase,
+                              ContainerTestCase,
+                              ExceptionTestCase,
+                              BugsTestCase)
+
+if __name__ == "__main__":
+    test_main()

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_mmap.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_mmap.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,397 @@
+from test.test_support import verify, vereq, TESTFN
+import mmap
+import os, re
+
+PAGESIZE = mmap.PAGESIZE
+
+def test_both():
+    "Test mmap module on Unix systems and Windows"
+
+    # Create a file to be mmap'ed.
+    if os.path.exists(TESTFN):
+        os.unlink(TESTFN)
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        # Write 2 pages worth of data to the file
+        f.write('\0'* PAGESIZE)
+        f.write('foo')
+        f.write('\0'* (PAGESIZE-3) )
+        f.flush()
+        m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
+        f.close()
+
+        # Simple sanity checks
+
+        print type(m)  # SF bug 128713:  segfaulted on Linux
+        print '  Position of foo:', m.find('foo') / float(PAGESIZE), 'pages'
+        vereq(m.find('foo'), PAGESIZE)
+
+        print '  Length of file:', len(m) / float(PAGESIZE), 'pages'
+        vereq(len(m), 2*PAGESIZE)
+
+        print '  Contents of byte 0:', repr(m[0])
+        vereq(m[0], '\0')
+        print '  Contents of first 3 bytes:', repr(m[0:3])
+        vereq(m[0:3], '\0\0\0')
+
+        # Modify the file's content
+        print "\n  Modifying file's content..."
+        m[0] = '3'
+        m[PAGESIZE +3: PAGESIZE +3+3] = 'bar'
+
+        # Check that the modification worked
+        print '  Contents of byte 0:', repr(m[0])
+        vereq(m[0], '3')
+        print '  Contents of first 3 bytes:', repr(m[0:3])
+        vereq(m[0:3], '3\0\0')
+        print '  Contents of second page:',  repr(m[PAGESIZE-1 : PAGESIZE + 7])
+        vereq(m[PAGESIZE-1 : PAGESIZE + 7], '\0foobar\0')
+
+        m.flush()
+
+        # Test doing a regular expression match in an mmap'ed file
+        match = re.search('[A-Za-z]+', m)
+        if match is None:
+            print '  ERROR: regex match on mmap failed!'
+        else:
+            start, end = match.span(0)
+            length = end - start
+
+            print '  Regex match on mmap (page start, length of match):',
+            print start / float(PAGESIZE), length
+
+            vereq(start, PAGESIZE)
+            vereq(end, PAGESIZE + 6)
+
+        # test seeking around (try to overflow the seek implementation)
+        m.seek(0,0)
+        print '  Seek to zeroth byte'
+        vereq(m.tell(), 0)
+        m.seek(42,1)
+        print '  Seek to 42nd byte'
+        vereq(m.tell(), 42)
+        m.seek(0,2)
+        print '  Seek to last byte'
+        vereq(m.tell(), len(m))
+
+        print '  Try to seek to negative position...'
+        try:
+            m.seek(-1)
+        except ValueError:
+            pass
+        else:
+            verify(0, 'expected a ValueError but did not get it')
+
+        print '  Try to seek beyond end of mmap...'
+        try:
+            m.seek(1,2)
+        except ValueError:
+            pass
+        else:
+            verify(0, 'expected a ValueError but did not get it')
+
+        print '  Try to seek to negative position...'
+        try:
+            m.seek(-len(m)-1,2)
+        except ValueError:
+            pass
+        else:
+            verify(0, 'expected a ValueError but did not get it')
+
+        # Try resizing map
+        print '  Attempting resize()'
+        try:
+            m.resize(512)
+        except SystemError:
+            # resize() not supported
+            # No messages are printed, since the output of this test suite
+            # would then be different across platforms.
+            pass
+        else:
+            # resize() is supported
+            verify(len(m) == 512,
+                    "len(m) is %d, but expecting 512" % (len(m),) )
+            # Check that we can no longer seek beyond the new size.
+            try:
+                m.seek(513,0)
+            except ValueError:
+                pass
+            else:
+                verify(0, 'Could seek beyond the new size')
+
+            # Check that the underlying file is truncated too
+            # (bug #728515)
+            f = open(TESTFN)
+            f.seek(0, 2)
+            verify(f.tell() == 512, 'Underlying file not truncated')
+            f.close()
+            verify(m.size() == 512, 'New size not reflected in file')
+
+        m.close()
+
+    finally:
+        try:
+            f.close()
+        except OSError:
+            pass
+        try:
+            os.unlink(TESTFN)
+        except OSError:
+            pass
+
+    # Test for "access" keyword parameter
+    try:
+        mapsize = 10
+        print "  Creating", mapsize, "byte test data file."
+        f = open(TESTFN, "wb")
+        f.write("a"*mapsize)
+        f.close()
+        print "  Opening mmap with access=ACCESS_READ"
+        f = open(TESTFN, "rb")
+        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
+        verify(m[:] == 'a'*mapsize, "Readonly memory map data incorrect.")
+
+        print "  Ensuring that readonly mmap can't be slice assigned."
+        try:
+            m[:] = 'b'*mapsize
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be item assigned."
+        try:
+            m[0] = 'b'
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be write() to."
+        try:
+            m.seek(0,0)
+            m.write('abc')
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be write_byte() to."
+        try:
+            m.seek(0,0)
+            m.write_byte('d')
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be resized."
+        try:
+            m.resize(2*mapsize)
+        except SystemError:   # resize is not universally supported
+            pass
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to resize readonly memory map")
+        del m, f
+        verify(open(TESTFN, "rb").read() == 'a'*mapsize,
+               "Readonly memory map data file was modified")
+
+        print "  Opening mmap with size too big"
+        import sys
+        f = open(TESTFN, "r+b")
+        try:
+            m = mmap.mmap(f.fileno(), mapsize+1)
+        except ValueError:
+            # we do not expect a ValueError on Windows
+            # CAUTION:  This also changes the size of the file on disk, and
+            # later tests assume that the length hasn't changed.  We need to
+            # repair that.
+            if sys.platform.startswith('win'):
+                verify(0, "Opening mmap with size+1 should work on Windows.")
+        else:
+            # we expect a ValueError on Unix, but not on Windows
+            if not sys.platform.startswith('win'):
+                verify(0, "Opening mmap with size+1 should raise ValueError.")
+            m.close()
+        f.close()
+        if sys.platform.startswith('win'):
+            # Repair damage from the resizing test.
+            f = open(TESTFN, 'r+b')
+            f.truncate(mapsize)
+            f.close()
+
+        print "  Opening mmap with access=ACCESS_WRITE"
+        f = open(TESTFN, "r+b")
+        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
+        print "  Modifying write-through memory map."
+        m[:] = 'c'*mapsize
+        verify(m[:] == 'c'*mapsize,
+               "Write-through memory map memory not updated properly.")
+        m.flush()
+        m.close()
+        f.close()
+        f = open(TESTFN, 'rb')
+        stuff = f.read()
+        f.close()
+        verify(stuff == 'c'*mapsize,
+               "Write-through memory map data file not updated properly.")
+
+        print "  Opening mmap with access=ACCESS_COPY"
+        f = open(TESTFN, "r+b")
+        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
+        print "  Modifying copy-on-write memory map."
+        m[:] = 'd'*mapsize
+        verify(m[:] == 'd' * mapsize,
+               "Copy-on-write memory map data not written correctly.")
+        m.flush()
+        verify(open(TESTFN, "rb").read() == 'c'*mapsize,
+               "Copy-on-write test data file should not be modified.")
+        try:
+            print "  Ensuring copy-on-write maps cannot be resized."
+            m.resize(2*mapsize)
+        except TypeError:
+            pass
+        else:
+            verify(0, "Copy-on-write mmap resize did not raise exception.")
+        del m, f
+        try:
+            print "  Ensuring invalid access parameter raises exception."
+            f = open(TESTFN, "r+b")
+            m = mmap.mmap(f.fileno(), mapsize, access=4)
+        except ValueError:
+            pass
+        else:
+            verify(0, "Invalid access code should have raised exception.")
+
+        if os.name == "posix":
+            # Try incompatible flags, prot and access parameters.
+            f = open(TESTFN, "r+b")
+            try:
+                m = mmap.mmap(f.fileno(), mapsize, flags=mmap.MAP_PRIVATE,
+                              prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
+            except ValueError:
+                pass
+            else:
+                verify(0, "Incompatible parameters should raise ValueError.")
+            f.close()
+    finally:
+        try:
+            os.unlink(TESTFN)
+        except OSError:
+            pass
+
+    print '  Try opening a bad file descriptor...'
+    try:
+        mmap.mmap(-2, 4096)
+    except mmap.error:
+        pass
+    else:
+        verify(0, 'expected a mmap.error but did not get it')
+
+    # Do a tougher .find() test.  SF bug 515943 pointed out that, in 2.2,
+    # searching for data with embedded \0 bytes didn't work.
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        data = 'aabaac\x00deef\x00\x00aa\x00'
+        n = len(data)
+        f.write(data)
+        f.flush()
+        m = mmap.mmap(f.fileno(), n)
+        f.close()
+
+        for start in range(n+1):
+            for finish in range(start, n+1):
+                slice = data[start : finish]
+                vereq(m.find(slice), data.find(slice))
+                vereq(m.find(slice + 'x'), -1)
+        m.close()
+
+    finally:
+        os.unlink(TESTFN)
+
+    # make sure a double close doesn't crash on Solaris (Bug# 665913)
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        f.write(2**16 * 'a') # Arbitrary character
+        f.close()
+
+        f = open(TESTFN)
+        mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
+        mf.close()
+        mf.close()
+        f.close()
+
+    finally:
+        os.unlink(TESTFN)
+
+    # test mapping of entire file by passing 0 for map length
+    if hasattr(os, "stat"):
+        print "  Ensuring that passing 0 as map length sets map size to current file size."
+        f = open(TESTFN, "w+")
+
+        try:
+            f.write(2**16 * 'm') # Arbitrary character
+            f.close()
+
+            f = open(TESTFN, "rb+")
+            mf = mmap.mmap(f.fileno(), 0)
+            verify(len(mf) == 2**16, "Map size should equal file size.")
+            vereq(mf.read(2**16), 2**16 * "m")
+            mf.close()
+            f.close()
+
+        finally:
+            os.unlink(TESTFN)
+
+    # test mapping of entire file by passing 0 for map length
+    if hasattr(os, "stat"):
+        print "  Ensuring that passing 0 as map length sets map size to current file size."
+        f = open(TESTFN, "w+")
+        try:
+            f.write(2**16 * 'm') # Arbitrary character
+            f.close()
+
+            f = open(TESTFN, "rb+")
+            mf = mmap.mmap(f.fileno(), 0)
+            verify(len(mf) == 2**16, "Map size should equal file size.")
+            vereq(mf.read(2**16), 2**16 * "m")
+            mf.close()
+            f.close()
+
+        finally:
+            os.unlink(TESTFN)
+
+    # make move works everywhere (64-bit format problem earlier)
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        f.write("ABCDEabcde") # Arbitrary character
+        f.flush()
+
+        mf = mmap.mmap(f.fileno(), 10)
+        mf.move(5, 0, 5)
+        verify(mf[:] == "ABCDEABCDE", "Map move should have duplicated front 5")
+        mf.close()
+        f.close()
+
+    finally:
+        os.unlink(TESTFN)
+
+def test_anon():
+    print "  anonymous mmap.mmap(-1, PAGESIZE)..."
+    m = mmap.mmap(-1, PAGESIZE)
+    for x in xrange(PAGESIZE):
+        verify(m[x] == '\0', "anonymously mmap'ed contents should be zero")
+
+    for x in xrange(PAGESIZE):
+        m[x] = ch = chr(x & 255)
+        vereq(m[x], ch)
+
+test_both()
+test_anon()
+print ' Test passed'

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_module.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_module.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,48 @@
+# Test the module type
+
+from test.test_support import verify, vereq, verbose, TestFailed
+
+import sys
+module = type(sys)
+
+# An uninitialized module has no __dict__ or __name__, and __doc__ is None
+foo = module.__new__(module)
+verify(not foo.__dict__)
+try:
+    s = foo.__name__
+except AttributeError:
+    pass
+else:
+    raise TestFailed, "__name__ = %s" % repr(s)
+vereq(foo.__doc__, module.__doc__)
+
+# Regularly initialized module, no docstring
+foo = module("foo")
+vereq(foo.__name__, "foo")
+vereq(foo.__doc__, None)
+vereq(foo.__dict__, {"__name__": "foo", "__doc__": None})
+
+# ASCII docstring
+foo = module("foo", "foodoc")
+vereq(foo.__name__, "foo")
+vereq(foo.__doc__, "foodoc")
+vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc"})
+
+# Unicode docstring
+foo = module("foo", u"foodoc\u1234")
+vereq(foo.__name__, "foo")
+vereq(foo.__doc__, u"foodoc\u1234")
+vereq(foo.__dict__, {"__name__": "foo", "__doc__": u"foodoc\u1234"})
+
+# Reinitialization should not replace the __dict__
+foo.bar = 42
+d = foo.__dict__
+foo.__init__("foo", "foodoc")
+vereq(foo.__name__, "foo")
+vereq(foo.__doc__, "foodoc")
+vereq(foo.bar, 42)
+vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc", "bar": 42})
+verify(foo.__dict__ is d)
+
+if verbose:
+    print "All OK"

Added: pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_mutants.py
==============================================================================
--- (empty file)
+++ pypy/branch/2.5-features/lib-python/modified-2.5.1/test/test_mutants.py	Mon Aug 18 01:37:16 2008
@@ -0,0 +1,292 @@
+from test.test_support import verbose, TESTFN
+import random
+import os
+
+# From SF bug #422121:  Insecurities in dict comparison.
+
+# Safety of code doing comparisons has been an historical Python weak spot.
+# The problem is that comparison of structures written in C *naturally*
+# wants to hold on to things like the size of the container, or "the
+# biggest" containee so far, across a traversal of the container; but
+# code to do containee comparisons can call back into Python and mutate
+# the container in arbitrary ways while the C loop is in midstream.  If the
+# C code isn't extremely paranoid about digging things out of memory on
+# each trip, and artificially boosting refcounts for the duration, anything
+# from infinite loops to OS crashes can result (yes, I use Windows <wink>).
+#
+# The other problem is that code designed to provoke a weakness is usually
+# white-box code, and so catches only the particular vulnerabilities the
+# author knew to protect against.  For example, Python's list.sort() code
+# went thru many iterations as one "new" vulnerability after another was
+# discovered.
+#
+# So the dict comparison test here uses a black-box approach instead,
+# generating dicts of various sizes at random, and performing random
+# mutations on them at random times.  This proved very effective,
+# triggering at least six distinct failure modes the first 20 times I
+# ran it.  Indeed, at the start, the driver never got beyond 6 iterations
+# before the test died.
+
+# The dicts are global to make it easy to mutate tham from within functions.
+dict1 = {}
+dict2 = {}
+
+# The current set of keys in dict1 and dict2.  These are materialized as
+# lists to make it easy to pick a dict key at random.
+dict1keys = []
+dict2keys = []
+
+# Global flag telling maybe_mutate() whether to *consider* mutating.
+mutate = 0
+
+# If global mutate is true, consider mutating a dict.  May or may not
+# mutate a dict even if mutate is true.  If it does decide to mutate a
+# dict, it picks one of {dict1, dict2} at random, and deletes a random
+# entry from it; or, more rarely, adds a random element.
+
+def maybe_mutate():
+    global mutate
+    if not mutate:
+        return
+    if random.random() < 0.5:
+        return
+
+    if random.random() < 0.5:
+        target, keys = dict1, dict1keys
+    else:
+        target, keys = dict2, dict2keys
+
+    if random.random() < 0.2:
+        # Insert a new key.
+        mutate = 0   # disable mutation until key inserted
+        while 1:
+            newkey = Horrid(random.randrange(100))
+            if newkey not in target:
+                break
+        target[newkey] = Horrid(random.randrange(100))
+        keys.append(newkey)
+        mutate = 1
+
+    elif keys:
+        # Delete a key at random.
+        mutate = 0   # disable mutation until key deleted
+        i = random.randrange(len(keys))
+        key = keys[i]
+        del target[key]
+        del keys[i]
+        mutate = 1
+
+# A horrid class that triggers random mutations of dict1 and dict2 when
+# instances are compared.
+
+class Horrid:
+    def __init__(self, i):
+        # Comparison outcomes are determined by the value of i.
+        self.i = i
+
+        # An artificial hashcode is selected at random so that we don't
+        # have any systematic relationship between comparison outcomes
+        # (based on self.i and other.i) and relative position within the
+        # hash vector (based on hashcode).
+        self.hashcode = random.randrange(1000000000)
+
+    def __hash__(self):
+        return 42
+        return self.hashcode
+
+    def __cmp__(self, other):
+        maybe_mutate()   # The point of the test.
+        return cmp(self.i, other.i)
+
+    def __eq__(self, other):
+        maybe_mutate()   # The point of the test.
+        return self.i == other.i
+
+    def __repr__(self):
+        return "Horrid(%d)" % self.i
+
+# Fill dict d with numentries (Horrid(i), Horrid(j)) key-value pairs,
+# where i and j are selected at random from the candidates list.
+# Return d.keys() after filling.
+
+def fill_dict(d, candidates, numentries):
+    d.clear()
+    for i in xrange(numentries):
+        d[Horrid(random.choice(candidates))] = \
+            Horrid(random.choice(candidates))
+    return d.keys()
+
+# Test one pair of randomly generated dicts, each with n entries.
+# Note that dict comparison is trivial if they don't have the same number
+# of entires (then the "shorter" dict is instantly considered to be the
+# smaller one, without even looking at the entries).
+
+def test_one(n):
+    global mutate, dict1, dict2, dict1keys, dict2keys
+
+    # Fill the dicts without mutating them.
+    mutate = 0
+    dict1keys = fill_dict(dict1, range(n), n)
+    dict2keys = fill_dict(dict2, range(n), n)
+
+    # Enable mutation, then compare the dicts so long as they have the
+    # same size.
+    mutate = 1
+    if verbose:
+        print "trying w/ lengths", len(dict1), len(dict2),
+    while dict1 and len(dict1) == len(dict2):
+        if verbose:
+            print ".",
+        if random.random() < 0.5:
+            c = cmp(dict1, dict2)
+        else:
+            c = dict1 == dict2
+    if verbose:
+        print
+
+# Run test_one n times.  At the start (before the bugs were fixed), 20
+# consecutive runs of this test each blew up on or before the sixth time
+# test_one was run.  So n doesn't have to be large to get an interesting
+# test.
+# OTOH, calling with large n is also interesting, to ensure that the fixed
+# code doesn't hold on to refcounts *too* long (in which case memory would
+# leak).
+
+def test(n):
+    for i in xrange(n):
+        test_one(random.randrange(1, 100))
+
+# See last comment block for clues about good values for n.
+test(20)
+
+##########################################################################
+# Another segfault bug, distilled by Michael Hudson from a c.l.py post.
+
+class Child:
+    def __init__(self, parent):
+        self.__dict__['parent'] = parent
+    def __getattr__(self, attr):
+        self.parent.a = 1
+        self.parent.b = 1
+        self.parent.c = 1
+        self.parent.d = 1
+        self.parent.e = 1
+        self.parent.f = 1
+        self.parent.g = 1
+        self.parent.h = 1
+        self.parent.i = 1
+        return getattr(self.parent, attr)
+
+class Parent:
+    def __init__(self):
+        self.a = Child(self)
+
+# Hard to say what this will print!  May vary from time to time.  But
+# we're specifically trying to test the tp_print slot here, and this is
+# the clearest way to do it.  We print the result to a temp file so that
+# the expected-output file doesn't need to change.
+
+f = open(TESTFN, "w")
+print >> f, Parent().__dict__
+f.close()
+os.unlink(TESTFN)
+
+##########################################################################
+# And another core-dumper from Michael Hudson.
+
+dict = {}
+
+# Force dict to malloc its table.
+for i in range(1, 10):
+    dict[i] = i
+
+f = open(TESTFN, "w")
+
+class Machiavelli:
+    def __repr__(self):
+        dict.clear()
+
+        # Michael sez:  "doesn't crash without this.  don't know why."
+        # Tim sez:  "luck of the draw; crashes with or without for me."
+        print >> f
+
+        return `"machiavelli"`
+
+    def __hash__(self):
+        return 0
+
+dict[Machiavelli()] = Machiavelli()
+
+print >> f, str(dict)
+f.close()
+os.unlink(TESTFN)
+del f, dict
+
+
+##########################################################################
+# And another core-dumper from Michael Hudson.
+
+dict = {}
+
+# let's force dict to malloc its table
+for i in range(1, 10):
+    dict[i] = i
+
+class Machiavelli2:
+    def __eq__(self, other):
+        dict.clear()
+        return 1
+
+    def __hash__(self):
+        return 0
+
+dict[Machiavelli2()] = Machiavelli2()
+
+try:
+    dict[Machiavelli2()]
+except KeyError:
+    pass
+
+del dict
+
+##########################################################################
+# And another core-dumper from Michael Hudson.
+
+dict = {}
+
+# let's force dict to malloc its table
+for i in range(1, 10):
+    dict[i] = i
+
+class Machiavelli3:
+    def __init__(self, id):
+        self.id = id
+
+    def __eq__(self, other):
+        if self.id == other.id:
+            dict.clear()
+            return 1
+        else:
+            return 0
+
+    def __repr__(self):
+        return "%s(%s)"%(self.__class__.__name__, self.id)
+
+    def __hash__(self):
+        return 0
+
+dict[Machiavelli3(1)] = Machiavelli3(0)
+dict[Machiavelli3(2)] = Machiavelli3(0)
+
+f = open(TESTFN, "w")
+try:
+    try:
+        print >> f, dict[Machiavelli3(2)]
+    except KeyError:
+        pass
+finally:
+    f.close()
+    os.unlink(TESTFN)
+
+del dict
+del dict1, dict2, dict1keys, dict2keys



More information about the Pypy-commit mailing list