[pypy-svn] r13096 - pypy/branch/pycompiler/module/recparser/test

adim at codespeak.net adim at codespeak.net
Mon Jun 6 13:14:08 CEST 2005


Author: adim
Date: Mon Jun  6 13:14:07 2005
New Revision: 13096

Modified:
   pypy/branch/pycompiler/module/recparser/test/test_pytokenizer.py
Log:
fixed tests to match the new lexer API


Modified: pypy/branch/pycompiler/module/recparser/test/test_pytokenizer.py
==============================================================================
--- pypy/branch/pycompiler/module/recparser/test/test_pytokenizer.py	(original)
+++ pypy/branch/pycompiler/module/recparser/test/test_pytokenizer.py	Mon Jun  6 13:14:07 2005
@@ -1,14 +1,16 @@
 from pypy.module.recparser.pythonlexer import PythonSource, py_number, \
      g_symdef, g_string, py_name, py_punct
+from pypy.module.recparser.grammar import Token
 
 def parse_source(source):
     """returns list of parsed tokens"""
     lexer = PythonSource(source)
     tokens = []
-    last_token = ''
-    while last_token != 'ENDMARKER':
-        last_token, value = lexer.next()
-        tokens.append((last_token, value))
+    last_token = Token(None, None)
+    while last_token.name != 'ENDMARKER':
+        last_token = lexer.next()
+        # tokens.append((last_token, value))
+        tokens.append(last_token)
     return tokens
 
 ## class TestSuite:
@@ -53,27 +55,30 @@
     s = """['a'
     ]"""
     tokens = parse_source(s)
-    assert tokens == [('[', None), ('STRING', "'a'"), (']', None),
-                      ('NEWLINE', ''), ('ENDMARKER', None)]
+    assert tokens == [Token('[', None), Token('STRING', "'a'"),
+                      Token(']', None), Token('NEWLINE', ''),
+                      Token('ENDMARKER', None)]
 
 def test_numbers():
     """make sure all kind of numbers are correctly parsed"""
     for number in NUMBERS:
-        assert parse_source(number)[0] == ('NUMBER', number)
+        assert parse_source(number)[0] == Token('NUMBER', number)
         neg = '-%s' % number
-        assert parse_source(neg)[:2] == [('-', None), ('NUMBER', number)]
+        assert parse_source(neg)[:2] == [Token('-', None), 
+                                         Token('NUMBER', number)]
     for number in BAD_NUMBERS:
-        assert parse_source(number)[0] != ('NUMBER', number)
+        assert parse_source(number)[0] != Token('NUMBER', number)
 
 def test_hex_number():
     """basic pasrse"""
     tokens = parse_source("a = 0x12L")
-    assert tokens == [('NAME', 'a'), ('=', None), ('NUMBER', '0x12L'),
-                      ('NEWLINE', ''), ('ENDMARKER', None)]
+    assert tokens == [Token('NAME', 'a'), Token('=', None),
+                      Token('NUMBER', '0x12L'), Token('NEWLINE', ''),
+                      Token('ENDMARKER', None)]
 
 def test_punct():
     """make sure each punctuation is correctly parsed"""
     for pstr in PUNCTS:
         tokens = parse_source(pstr)
-        assert tokens[0][0] == pstr
+        assert tokens[0].name == pstr
 



More information about the Pypy-commit mailing list