[pypy-svn] r11427 - pypy/dist/pypy/module/parser/recparser/test

adim at codespeak.net adim at codespeak.net
Mon Apr 25 17:21:08 CEST 2005


Author: adim
Date: Mon Apr 25 17:21:08 2005
New Revision: 11427

Modified:
   pypy/dist/pypy/module/parser/recparser/test/test_pytokenizer.py
Log:
use py.test instead of unittest


Modified: pypy/dist/pypy/module/parser/recparser/test/test_pytokenizer.py
==============================================================================
--- pypy/dist/pypy/module/parser/recparser/test/test_pytokenizer.py	(original)
+++ pypy/dist/pypy/module/parser/recparser/test/test_pytokenizer.py	Mon Apr 25 17:21:08 2005
@@ -1,43 +1,8 @@
-import unittest
 from python.lexer import PythonSource, py_number, g_symdef, g_string, py_name, \
-     py_comment, py_ws, py_punct
-
-class TokenValPair(tuple):
-    token = 'Override me'
-    def __new__(cls, val = None):
-        return tuple.__new__(cls, (cls.token, val))
-
-TokenMap = {
-    'Equals' : "=",
-    'NonePair' : None,
-    }
-ctx = globals()
-for classname in ('Number', 'String', 'EndMarker', 'NewLine', 'Dedent', 'Name',
-                  'Equals', 'NonePair', 'SymDef', 'Symbol'):
-    classdict = {'token' : TokenMap.get(classname, classname.upper())}
-    ctx[classname] = type(classname, (TokenValPair,), classdict)
-
-
-PUNCTS = [ '>=', '<>', '!=', '<', '>', '<=', '==', '*=',
-           '//=', '%=', '^=', '<<=', '**=', '|=',
-           '+=', '>>=', '=', '&=', '/=', '-=', ',', '^',
-           '>>', '&', '+', '*', '-', '/', '.', '**',
-           '%', '<<', '//', '|', ')', '(', ';', ':',
-           '@', '[', ']', '`', '{', '}',
-           ]
-
-
-BAD_SYNTAX_STMTS = [
-    # "yo yo",
-    """for i in range(10):
-    print i
-  print 'bad dedent here'""",
-    """for i in range(10):
-  print i
-    print 'Bad indentation here'""",
-    ]
+     py_punct
 
 def parse_source(source):
+    """returns list of parsed tokens"""
     lexer = PythonSource(source)
     tokens = []
     last_token = ''
@@ -46,24 +11,31 @@
         tokens.append((last_token, value))
     return tokens
 
-
-NUMBERS = [
-    '1', '1.23', '1.', '0',
-    '1L', '1l',
-    '0x12L', '0x12l', '0X12', '0x12',
-    '1j', '1J',
-    '1e2', '1.2e4',
-    '0.1', '0.', '0.12', '.2',
-    ]
-
-BAD_NUMBERS = [
-    'j', '0xg', '0xj', '0xJ',
-    ]
-
-class PythonSourceTC(unittest.TestCase):
-    """ """
-    def setUp(self):
-        pass
+class TestSuite:
+    """Tokenizer test suite"""
+    PUNCTS = [
+        # Here should be listed each existing punctuation
+        '>=', '<>', '!=', '<', '>', '<=', '==', '*=',
+        '//=', '%=', '^=', '<<=', '**=', '|=',
+        '+=', '>>=', '=', '&=', '/=', '-=', ',', '^',
+        '>>', '&', '+', '*', '-', '/', '.', '**',
+        '%', '<<', '//', '|', ')', '(', ';', ':',
+        '@', '[', ']', '`', '{', '}',
+        ]
+
+    NUMBERS = [
+        # Here should be listed each different form of number
+        '1', '1.23', '1.', '0',
+        '1L', '1l',
+        '0x12L', '0x12l', '0X12', '0x12',
+        '1j', '1J',
+        '1e2', '1.2e4',
+        '0.1', '0.', '0.12', '.2',
+        ]
+
+    BAD_NUMBERS = [
+        'j', '0xg', '0xj', '0xJ',
+        ]
 
     def test_empty_string(self):
         """make sure defined regexps don't match empty string"""
@@ -74,38 +46,35 @@
                  'punct'   : py_punct,
                  }
         for label, rgx in rgxes.items():
-            self.assert_(rgx.match('') is None, '%s matches empty string' % label)
+            assert rgx.match('') is None, '%s matches empty string' % label
 
     def test_several_lines_list(self):
         """tests list definition on several lines"""
         s = """['a'
         ]"""
         tokens = parse_source(s)
-        self.assertEquals(tokens, [('[', None), ('STRING', "'a'"), (']', None),
-                                   ('NEWLINE', ''), ('ENDMARKER', None)])
+        assert tokens == [('[', None), ('STRING', "'a'"), (']', None),
+                          ('NEWLINE', ''), ('ENDMARKER', None)]
 
     def test_numbers(self):
         """make sure all kind of numbers are correctly parsed"""
-        for number in NUMBERS:
-            self.assertEquals(parse_source(number)[0], ('NUMBER', number))
+        for number in self.NUMBERS:
+            assert parse_source(number)[0] == ('NUMBER', number)
             neg = '-%s' % number
-            self.assertEquals(parse_source(neg)[:2],
-                              [('-', None), ('NUMBER', number)])
-        for number in BAD_NUMBERS:
-            self.assertNotEquals(parse_source(number)[0], ('NUMBER', number))
-    
+            assert parse_source(neg)[:2] == [('-', None), ('NUMBER', number)]
+        for number in self.BAD_NUMBERS:
+            assert parse_source(number)[0] != ('NUMBER', number)
+
     def test_hex_number(self):
+        """basic pasrse"""
         tokens = parse_source("a = 0x12L")
-        self.assertEquals(tokens, [('NAME', 'a'), ('=', None),
-                                   ('NUMBER', '0x12L'), ('NEWLINE', ''),
-                                   ('ENDMARKER', None)])
-        
-    def test_punct(self):
-        for pstr in PUNCTS:
-            tokens = parse_source( pstr )
-            self.assertEqual( tokens[0][0], pstr )
+        assert tokens == [('NAME', 'a'), ('=', None), ('NUMBER', '0x12L'),
+                          ('NEWLINE', ''), ('ENDMARKER', None)]
 
+    def test_punct(self):
+        """make sure each punctuation is correctly parsed"""
+        for pstr in self.PUNCTS:
+            tokens = parse_source(pstr)
+            assert tokens[0][0] == pstr
 
-if __name__ == '__main__':
-    unittest.main()
 



More information about the Pypy-commit mailing list