[Python-checkins] cpython (2.7): Issue #14904: Made test_unicode_repr_oflw to use less memory.

serhiy.storchaka python-checkins at python.org
Mon Mar 30 08:28:07 CEST 2015


https://hg.python.org/cpython/rev/5bea08f80e62
changeset:   95277:5bea08f80e62
branch:      2.7
parent:      95266:6ef2cacec2e9
user:        Serhiy Storchaka <storchaka at gmail.com>
date:        Mon Mar 30 09:27:20 2015 +0300
summary:
  Issue #14904: Made test_unicode_repr_oflw to use less memory.
Corrected memory requirements in other bigmem tests.

files:
  Lib/test/test_bigmem.py |  77 ++++++++++++++++------------
  1 files changed, 43 insertions(+), 34 deletions(-)


diff --git a/Lib/test/test_bigmem.py b/Lib/test/test_bigmem.py
--- a/Lib/test/test_bigmem.py
+++ b/Lib/test/test_bigmem.py
@@ -33,6 +33,11 @@
 #    memuse-per-size should remain sane (less than a few thousand); if your
 #    test uses more, adjust 'size' upward, instead.
 
+if test_support.have_unicode:
+    character_size = 4 if sys.maxunicode > 0xFFFF else 2
+else:
+    character_size = 1
+
 class StrTest(unittest.TestCase):
     @bigmemtest(minsize=_2G, memuse=2)
     def test_capitalize(self, size):
@@ -54,7 +59,8 @@
         self.assertEqual(s[lpadsize:-rpadsize], SUBSTR)
         self.assertEqual(s.strip(), SUBSTR.strip())
 
-    @precisionbigmemtest(size=_2G - 1, memuse=1)
+    @test_support.requires_unicode
+    @precisionbigmemtest(size=_2G - 1, memuse=character_size)
     def test_center_unicode(self, size):
         SUBSTR = u' abc def ghi'
         try:
@@ -81,7 +87,8 @@
         self.assertEqual(s.count('i'), 1)
         self.assertEqual(s.count('j'), 0)
 
-    @bigmemtest(minsize=_2G + 2, memuse=3)
+    @test_support.requires_unicode
+    @bigmemtest(minsize=_2G + 2, memuse=1 + character_size)
     def test_decode(self, size):
         s = '.' * size
         self.assertEqual(len(s.decode('utf-8')), size)
@@ -93,45 +100,30 @@
         s = c * size
         self.assertEqual(len(s.encode(enc)), expectedsize)
 
-    @bigmemtest(minsize=_2G + 2, memuse=3)
+    @test_support.requires_unicode
+    @bigmemtest(minsize=_2G + 2, memuse=character_size + 4)
     def test_encode(self, size):
-        return self.basic_encode_test(size, 'utf-8')
+        self.basic_encode_test(size, 'utf-8')
 
-    @precisionbigmemtest(size=_4G // 6 + 2, memuse=2)
+    @test_support.requires_unicode
+    @precisionbigmemtest(size=_4G // 6 + 2, memuse=character_size + 6)
     def test_encode_raw_unicode_escape(self, size):
-        try:
-            return self.basic_encode_test(size, 'raw_unicode_escape')
-        except MemoryError:
-            pass # acceptable on 32-bit
+        self.basic_encode_test(size, 'raw_unicode_escape')
 
-    @precisionbigmemtest(size=_4G // 5 + 70, memuse=3)
+    @test_support.requires_unicode
+    @precisionbigmemtest(size=_4G // 5 + 70, memuse=character_size + 8)
     def test_encode_utf7(self, size):
-        try:
-            return self.basic_encode_test(size, 'utf7')
-        except MemoryError:
-            pass # acceptable on 32-bit
+        self.basic_encode_test(size, 'utf7')
 
-    @precisionbigmemtest(size=_4G // 4 + 5, memuse=6)
+    @test_support.requires_unicode
+    @precisionbigmemtest(size=_4G // 4 + 5, memuse=character_size + 4)
     def test_encode_utf32(self, size):
-        try:
-            return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
-        except MemoryError:
-            pass # acceptable on 32-bit
+        self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
 
+    @test_support.requires_unicode
     @precisionbigmemtest(size=_2G-1, memuse=4)
     def test_decodeascii(self, size):
-        return self.basic_encode_test(size, 'ascii', c='A')
-
-    @precisionbigmemtest(size=_4G // 5, memuse=6+2)
-    def test_unicode_repr_oflw(self, size):
-        self.skipTest("test crashes - see issue #14904")
-        try:
-            s = u"\uAAAA"*size
-            r = repr(s)
-        except MemoryError:
-            pass # acceptable on 32-bit
-        else:
-            self.assertTrue(s == eval(r))
+        self.basic_encode_test(size, 'ascii', c='A')
 
     @bigmemtest(minsize=_2G, memuse=2)
     def test_endswith(self, size):
@@ -516,10 +508,27 @@
         self.assertEqual(s.count('\\'), size)
         self.assertEqual(s.count('0'), size * 2)
 
-    @bigmemtest(minsize=2**32 // 5, memuse=6+2)
+    @test_support.requires_unicode
+    @bigmemtest(minsize=2**32 // 6, memuse=character_size + 6)
     def test_unicode_repr(self, size):
-        s = u"\uAAAA" * size
-        self.assertTrue(len(repr(s)) > size)
+        s = unichr(0xABCD) * size
+        try:
+            r = repr(s)
+            self.assertEqual(len(r), 3 + 6 * size)
+            self.assertTrue(r.endswith(r"\uabcd'"), r[-10:])
+        finally:
+            s = r = None
+
+    @test_support.requires_unicode
+    @precisionbigmemtest(size=_4G // 6 + 1, memuse=character_size + 6)
+    def test_unicode_repr_oflw(self, size):
+        s = unichr(0xABCD) * size
+        try:
+            r = repr(s)
+            self.assertEqual(len(r), 3 + 6 * size)
+            self.assertTrue(r.endswith(r"\uabcd'"), r[-10:])
+        finally:
+            r = s = None
 
     # This test is meaningful even with size < 2G, as long as the
     # doubled string is > 2G (but it tests more if both are > 2G :)

-- 
Repository URL: https://hg.python.org/cpython


More information about the Python-checkins mailing list